repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
WarrenWeckesser/numpy | numpy/ma/extras.py | 3 | 58577 | """
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',
'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',
'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',
'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
mask_rowcols
)
import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
from numpy.lib.function_base import _ureduce
from numpy.lib.index_tricks import AxisConcatenator
def issequence(seq):
"""
Is seq a sequence (ndarray, list or tuple)?
"""
return isinstance(seq, (ndarray, tuple, list))
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, False, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : tuple
Shape of the required MaskedArray.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(
data=[[--, --, --],
[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float64)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(
data=[[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float32)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
This class should not be used directly. Instead, one of its extensions that
provides support for a specific type of input should be used.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
.. warning::
If the function docstring already contained a Notes section, the
new docstring will have two Notes sections instead of appending a note
to the existing section.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
locdoc = "Notes\n-----\nThe function is applied to both the _data"\
" and the _mask, if any."
return '\n'.join((sig, doc, locdoc))
return
def __call__(self, *args, **params):
pass
class _fromnxfunction_single(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single array
argument followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
if isinstance(x, ndarray):
_d = func(x.__array__(), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
else:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_seq(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single sequence
of arrays followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
_d = func(tuple([np.asarray(a) for a in x]), *args, **params)
_m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_args(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. The first non-array-like input marks the beginning of the
arguments that are passed verbatim for both the data and mask calls.
Array arguments are processed independently and the results are
returned in a list. If only one array is found, the return value is
just the processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
if len(arrays) == 1:
return res[0]
return res
class _fromnxfunction_allargs(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. Similar to `_fromnxfunction_args` except that all args
are converted to arrays even if they are not so already. This makes
it possible to process scalars as 1-D arrays. Only keyword arguments
are passed through verbatim for the data and mask calls. Arrays
arguments are processed independently and the results are returned
in a list. If only one arg is present, the return value is just the
processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
res = []
for x in args:
_d = func(np.asarray(x), **params)
_m = func(getmaskarray(x), **params)
res.append(masked_array(_d, mask=_m))
if len(args) == 1:
return res[0]
return res
atleast_1d = _fromnxfunction_allargs('atleast_1d')
atleast_2d = _fromnxfunction_allargs('atleast_2d')
atleast_3d = _fromnxfunction_allargs('atleast_3d')
vstack = row_stack = _fromnxfunction_seq('vstack')
hstack = _fromnxfunction_seq('hstack')
column_stack = _fromnxfunction_seq('column_stack')
dstack = _fromnxfunction_seq('dstack')
stack = _fromnxfunction_seq('stack')
hsplit = _fromnxfunction_single('hsplit')
diagflat = _fromnxfunction_single('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
axis = normalize_axis_index(axis, nd)
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result
# so we force the type to object, and build a list of dtypes. We'll
# just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = np.ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = np.ma.masked
>>> a[:,1,:] = np.ma.masked
>>> a
masked_array(
data=[[[0, --, 2, 3],
[--, --, --, --],
[8, 9, 10, 11]],
[[12, --, 14, 15],
[--, --, --, --],
[20, 21, 22, 23]]],
mask=[[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]],
[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]]],
fill_value=999999)
>>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
Tuple axis arguments to ufuncs are equivalent:
>>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
"""
def average(a, axis=None, weights=None, returned=False):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which to average `a`. If None, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. The 1-D calculation is::
avg = sum(a * weights) / sum(weights)
The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type and floats smaller than `float64`, or the
input data-type, otherwise. If returned, `sum_of_weights` is always
`float64`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> x
masked_array(
data=[[0., 1.],
[2., 3.],
[4., 5.]],
mask=False,
fill_value=1e+20)
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> avg
masked_array(data=[2.6666666666666665, 3.6666666666666665],
mask=[False, False],
fill_value=1e+20)
"""
a = asarray(a)
m = getmask(a)
# inspired by 'average' in numpy/lib/function_base.py
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.count(axis))
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if m is not nomask:
wgt = wgt*(~a.mask)
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.10.0
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
masked_array(data=[2.0, 5.0],
mask=[False, False],
fill_value=1e+20)
"""
if not hasattr(a, 'mask'):
m = np.median(getdata(a, subok=True), axis=axis,
out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
if isinstance(m, np.ndarray) and 1 <= m.ndim:
return masked_array(m, copy=False)
else:
return m
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# when an unmasked NaN is present return it, so we need to sort the NaN
# values behind the mask
if np.issubdtype(a.dtype, np.inexact):
fill_value = np.inf
else:
fill_value = None
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort(fill_value=fill_value)
else:
a.sort(axis=axis, fill_value=fill_value)
asorted = a
else:
asorted = sort(a, axis=axis, fill_value=fill_value)
if axis is None:
axis = 0
else:
axis = normalize_axis_index(axis, asorted.ndim)
if asorted.shape[axis] == 0:
# for empty axis integer indices fail so use slicing to get same result
# as median (which is mean of empty slice = nan)
indexer = [slice(None)] * asorted.ndim
indexer[axis] = slice(0, 0)
indexer = tuple(indexer)
return np.ma.mean(asorted[indexer], axis=axis, out=out)
if asorted.ndim == 1:
counts = count(asorted)
idx, odd = divmod(count(asorted), 2)
mid = asorted[idx + odd - 1:idx + 1]
if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
# avoid inf / x = masked
s = mid.sum(out=out)
if not odd:
s = np.true_divide(s, 2., casting='safe', out=out)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = mid.mean(out=out)
# if result is masked either the input contained enough
# minimum_fill_value so that it would be the median or all values
# masked
if np.ma.is_masked(s) and not np.all(asorted.mask):
return np.ma.minimum_fill_value(asorted)
return s
counts = count(asorted, axis=axis, keepdims=True)
h = counts // 2
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
l = np.where(odd, h, h-1)
lh = np.concatenate([l,h], axis=axis)
# get low and high median
low_high = np.take_along_axis(asorted, lh, axis=axis)
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
# are masked. This is required as the sort order of values equal or
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
replace_masked(low_high)
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum(low_high, axis=axis, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = np.ma.mean(low_high, axis=axis, out=out)
return s
def compress_nd(x, axis=None):
"""Suppress slices from multiple dimensions which contain masked values.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked, `x` is interpreted as a MaskedArray with `mask`
set to `nomask`.
axis : tuple of ints or int, optional
Which dimensions to suppress slices from can be configured with this
parameter.
- If axis is a tuple of ints, those are the axes to suppress slices from.
- If axis is an int, then that is the only axis to suppress slices from.
- If axis is None, all axis are selected.
Returns
-------
compress_array : ndarray
The compressed array.
"""
x = asarray(x)
m = getmask(x)
# Set axis to tuple of ints
if axis is None:
axis = tuple(range(x.ndim))
else:
axis = normalize_axis_tuple(axis, x.ndim)
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Filter elements through boolean indexing
data = x._data
for ax in axis:
axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(
data=[[--, 1, 2],
[--, 4, 5],
[6, 7, 8]],
mask=[[ True, False, False],
[ True, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_rows(a)
masked_array(
data=[[0, 0, 0],
[--, --, --],
[0, 0, 0]],
mask=[[False, False, False],
[ True, True, True],
[False, False, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_cols(a)
masked_array(
data=[[0, --, 0],
[0, --, 0],
[0, --, 0]],
mask=[[False, True, False],
[False, True, False],
[False, True, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 1)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> np.ma.intersect1d(x, y)
masked_array(data=[1, 3, --],
mask=[False, False, True],
fill_value=999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
We recommend using :func:`isin` instead of `in1d` for new code.
See Also
--------
isin : Version of this function that preserves the shape of ar1.
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over
`element` only.
The output is always a masked array of the same shape as `element`.
See `numpy.isin` for more details.
See Also
--------
in1d : Flattened version of this function.
numpy.isin : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.13.0
"""
element = ma.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2), axis=None))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
masked_array(data=[3, --],
mask=[False, True],
fill_value=999999)
"""
if assume_unique:
ar1 = ma.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
###############################################################################
# Covariance #
###############################################################################
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data.")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data.")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
xmask = x._mask = y._mask = ymask = common_mask
x._sharedmask = False
y._sharedmask = False
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
.. versionadded:: 1.5
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception. Because `bias` is deprecated, this
argument needs to be treated as keyword only to avoid a warning.
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
msg = 'bias and ddof have no effect and are deprecated'
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn(msg, DeprecationWarning, stacklevel=2)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
_denom._sharedmask = False # We know return is always a copy
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
concatenate = staticmethod(concatenate)
@classmethod
def makemat(cls, arr):
# There used to be a view as np.matrix here, but we may eventually
# deprecate that class. In preparation, we use the unmasked version
# to construct the matrix (with copy=False for backwards compatibility
# with the .view)
data = super(MAxisConcatenator, cls).makemat(arr.data, copy=False)
return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
return super(MAxisConcatenator, self).__getitem__(key)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
masked_array(data=[1, 2, 3, ..., 4, 5, 6],
mask=False,
fill_value=999999)
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
a : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_edges(a)
array([0, 9])
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print(np.ma.flatnotmasked_edges(a))
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_edges(am)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : narray
The input array.
Returns
-------
slice_list : list
A sorted sequence of `slice` objects (start index, end index).
..versionchanged:: 1.15.0
Now returns an empty list instead of None for a fully masked array
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_contiguous(a)
[slice(0, 10, None)]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> np.ma.flatnotmasked_contiguous(a)
[]
"""
m = getmask(a)
if m is nomask:
return [slice(0, a.size)]
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array, and this
is the same as `flatnotmasked_contiguous`.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
If the input is 2d and axis is specified, the result is a list of lists.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(12).reshape((3, 4))
>>> mask = np.zeros_like(a)
>>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
>>> ma = np.ma.array(a, mask=mask)
>>> ma
masked_array(
data=[[0, --, 2, 3],
[--, --, --, 7],
[8, --, --, 11]],
mask=[[False, True, False, False],
[ True, True, True, False],
[False, True, True, False]],
fill_value=999999)
>>> np.array(ma[~ma.mask])
array([ 0, 2, 3, 7, 8, 11])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
>>> np.ma.notmasked_contiguous(ma, axis=0)
[[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
>>> np.ma.notmasked_contiguous(ma, axis=1)
[[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[tuple(idx)]))
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
if mask[0]:
if len(idx) == 0:
return [slice(0, mask.size)]
r = [slice(0, idx[0])]
r.extend((slice(left, right)
for left, right in zip(idx[1:-1:2], idx[2::2])))
else:
if len(idx) == 0:
return []
r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
if mask[-1]:
r.append(slice(idx[-1], mask.size))
return r
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
return _ezclump(~mask)
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
return _ezclump(mask)
###############################################################################
# Polynomial fit #
###############################################################################
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
not_m = ~m
if w is not None:
w = w[not_m]
return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
else:
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
| bsd-3-clause | -720,906,215,882,748,500 | 29.382261 | 105 | 0.556703 | false |
wearpants/osf.io | website/addons/mendeley/tests/test_models.py | 22 | 2134 | # -*- coding: utf-8 -*-
import mock
from mendeley.exception import MendeleyApiException
from tests.base import OsfTestCase
from website.addons.base.testing.models import (
CitationAddonProviderTestSuiteMixin,
OAuthAddonUserSettingTestSuiteMixin,
OAuthCitationsNodeSettingsTestSuiteMixin,
)
from website.addons.mendeley.model import (
Mendeley, MendeleyNodeSettings,
)
from website.addons.mendeley.provider import MendeleyCitationsProvider
from website.addons.mendeley.tests.factories import (
MendeleyAccountFactory,
MendeleyUserSettingsFactory,
MendeleyNodeSettingsFactory,
)
class MendeleyProviderTestCase(CitationAddonProviderTestSuiteMixin, OsfTestCase):
short_name = 'mendeley'
full_name = 'Mendeley'
ExternalAccountFactory = MendeleyAccountFactory
ProviderClass = MendeleyCitationsProvider
OAuthProviderClass = Mendeley
ApiExceptionClass = MendeleyApiException
@mock.patch('website.addons.mendeley.model.Mendeley._get_client')
def test_handle_callback(self, mock_get_client):
# Must return provider_id and display_name
mock_client = mock.Mock()
mock_client.profiles.me = mock.Mock(id='testid', display_name='testdisplay')
mock_get_client.return_value = mock_client
res = self.provider.handle_callback('testresponse')
mock_get_client.assert_called_with(credentials='testresponse')
assert(res.get('provider_id') == 'testid')
assert(res.get('display_name') == 'testdisplay')
class MendeleyNodeSettingsTestCase(OAuthCitationsNodeSettingsTestSuiteMixin, OsfTestCase):
short_name = 'mendeley'
full_name = 'Mendeley'
ExternalAccountFactory = MendeleyAccountFactory
ProviderClass = MendeleyCitationsProvider
OAuthProviderClass = Mendeley
NodeSettingsFactory = MendeleyNodeSettingsFactory
NodeSettingsClass = MendeleyNodeSettings
UserSettingsFactory = MendeleyUserSettingsFactory
class MendeleyUserSettingsTestCase(OAuthAddonUserSettingTestSuiteMixin, OsfTestCase):
short_name = 'mendeley'
full_name = 'Mendeley'
ExternalAccountFactory = MendeleyAccountFactory
| apache-2.0 | -3,289,433,580,620,220,000 | 37.107143 | 90 | 0.772259 | false |
goldenbull/grpc | test/distrib/python/distribtest.py | 10 | 1770 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from grpc.beta import implementations
# This code doesn't do much but makes sure the native extension is loaded
# which is what we are testing here.
channel = implementations.insecure_channel('localhost', 1000)
del channel
print 'Success!'
| bsd-3-clause | 1,420,580,575,112,794,000 | 48.166667 | 73 | 0.783051 | false |
jazzband/django-discover-jenkins | tests/tests/test_runner.py | 3 | 2456 | from unittest import skipIf
import django
from discover_jenkins import runner, tasks
from django.test import TestCase
try:
from unittest.mock import MagicMock, patch
except ImportError:
from mock import MagicMock, patch
class FakeTestRunner(object):
"""
A fake object to stub out the base methods that the mixin's super() calls
require.
"""
def setup_test_environment(self):
pass
def teardown_test_environment(self):
pass
class Runner(runner.CIRunner, FakeTestRunner):
"""CIRunner is a mixin, so use the FakeTestRunner as a base"""
pass
class TestCIRunner(TestCase):
def test_get_tasks(self):
"""
Make sure the correct tasks are imported based on the
test_project.settings.
"""
self.assertEqual(runner.get_tasks(),
[tasks.with_coverage.CoverageTask,
tasks.run_pylint.PyLintTask,
tasks.run_flake8.Flake8Task,
tasks.run_jshint.JSHintTask,
tasks.run_sloccount.SlocCountTask])
@skipIf(django.VERSION >= (1, 8), "optparse is not used on Django 1.8+")
def test_get_task_options(self):
"""
For now, just do a simple test to make sure the right number of options
are gleaned from the tasks.
"""
self.assertEqual(len(runner.get_task_options()), 20)
def test_setup_test_environment(self):
"""
Make sure the setup_test_environment method on a task is triggered by
the runner.
"""
mock_task = MagicMock()
with patch.object(Runner, '__init__') as mock_init:
mock_init.return_value = None
cirun = Runner()
cirun.jenkins = True
cirun.tasks = [mock_task]
cirun.setup_test_environment()
self.assertTrue(mock_task.setup_test_environment.called)
def test_teardown_test_environment(self):
"""
Make sure the setup_test_environment method on a task is triggered by
the runner.
"""
mock_task = MagicMock()
with patch.object(Runner, '__init__') as mock_init:
mock_init.return_value = None
cirun = Runner()
cirun.jenkins = True
cirun.tasks = [mock_task]
cirun.teardown_test_environment()
self.assertTrue(mock_task.teardown_test_environment.called)
| bsd-3-clause | -6,197,833,367,805,272,000 | 29.320988 | 79 | 0.599756 | false |
xzturn/tensorflow | xzturn/wechat/train.py | 1 | 6281 | # -*- coding: utf-8 -*-
from __future__ import print_function
import tensorflow as tf
import argparse
import time
import os
from six.moves import cPickle
from utils import WechatLoader
from model import Model
def train(args):
data_loader = WechatLoader(args.corpus_file, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
# check compatibility if training is continued from previously saved model
if args.init_from is not None:
# check if all necessary files exist
assert os.path.isdir(args.init_from), " %s must be a a path" % args.init_from
assert os.path.isfile(os.path.join(args.init_from, "config.pkl")),\
"config.pkl file does not exist in path %s" % args.init_from
assert os.path.isfile(os.path.join(args.init_from, "chars_vocab.pkl")),\
"chars_vocab.pkl.pkl file does not exist in path %s" % args.init_from
ckpt = tf.train.get_checkpoint_state(args.init_from)
assert ckpt, "No checkpoint found"
assert ckpt.model_checkpoint_path, "No model path found in checkpoint"
# open old config and check if models are compatible
with open(os.path.join(args.init_from, 'config.pkl'), 'rb') as f:
saved_model_args = cPickle.load(f)
need_be_same = ["model", "rnn_size", "num_layers", "seq_length"]
for checkme in need_be_same:
assert vars(saved_model_args)[checkme] == vars(args)[checkme],\
"Command line argument and saved model disagree on '%s' " % checkme
# open saved vocab/dict and check if vocabs/dicts are compatible
with open(os.path.join(args.init_from, 'chars_vocab.pkl'), 'rb') as f:
saved_chars, saved_vocab = cPickle.load(f)
assert saved_chars == data_loader.chars, "Data and loaded model disagree on character set!"
assert saved_vocab == data_loader.vocab, "Data and loaded model disagree on dictionary mappings!"
with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
cPickle.dump((data_loader.chars, data_loader.vocab), f)
model = Model(args)
t_start = time.time()
with tf.Session() as sess:
# init variables
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
# restore model
if args.init_from is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
state = sess.run(model.initial_state)
for b in range(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.targets: y}
for i, (c, h) in enumerate(model.initial_state):
feed[c] = state[i].c
feed[h] = state[i].h
train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
end = time.time()
print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}"
.format(e * data_loader.num_batches + b,
args.num_epochs * data_loader.num_batches,
e, train_loss, end - start))
if (e * data_loader.num_batches + b) % args.save_every == 0 \
or (e == args.num_epochs - 1 and b == data_loader.num_batches - 1): # save for the last result
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
print("model saved to {}".format(checkpoint_path))
t_end = time.time()
print("Total train time: {:.3f}".format(t_end - t_start))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--corpus_file', type=str, default='wechat.dat',
help='the corpus (json) file to learn from')
parser.add_argument('--save_dir', type=str, default='save',
help='directory to store checkpointed models')
parser.add_argument('--rnn_size', type=int, default=128,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='lstm',
help='rnn, gru, or lstm')
parser.add_argument('--batch_size', type=int, default=50,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=50,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=1000,
help='save frequency')
parser.add_argument('--grad_clip', type=float, default=5.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=0.002,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.97,
help='decay rate for rmsprop')
parser.add_argument('--init_from', type=str, default=None,
help="""continue training from saved model at this path. Path must contain files saved by previous training process:
'config.pkl' : configuration;
'chars_vocab.pkl' : vocabulary definitions;
'checkpoint' : paths to model file(s) (created by tf).
Note: this file contains absolute paths, be careful when moving files around;
'model.ckpt-*' : file(s) with model definition (created by tf)
""")
train(parser.parse_args())
| apache-2.0 | -8,568,529,064,496,981,000 | 50.909091 | 140 | 0.575068 | false |
dikshant2210/Neural-Machine-Translation | utils.py | 1 | 1309 | import unicodedata
import re
import time
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
MAX_LENGTH = 10
eng_prefixes = (
"i am", " i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def normalize_string(s):
s = unicode_to_ascii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-z.!?]", r" ", s)
return s
def filter_pair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filter_pairs(pairs):
return [pair for pair in pairs if filter_pair(pair)]
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def show_plot(points):
plt.figure()
fig, ax = plt.subplots()
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
| mit | -8,415,463,011,659,523,000 | 19.453125 | 56 | 0.563025 | false |
arrdem/pixie | pixie/vm/persistent_hash_set.py | 8 | 2777 | py_object = object
import pixie.vm.object as object
from pixie.vm.primitives import nil, true, false
import pixie.vm.persistent_hash_map as persistent_hash_map
import pixie.vm.stdlib as proto
from pixie.vm.code import extend, as_var, intern_var
import pixie.vm.rt as rt
VAR_KEY = intern_var(u"pixie.stdlib", u"key")
class PersistentHashSet(object.Object):
_type = object.Type(u"pixie.stdlib.PersistentHashSet")
def type(self):
return PersistentHashSet._type
def __init__(self, meta, m):
self._meta = meta
self._map = m
def conj(self, v):
return PersistentHashSet(self._meta, self._map.assoc(v, v))
def disj(self, k):
return PersistentHashSet(self._meta, self._map.without(k))
def meta(self):
return self._meta
def with_meta(self, meta):
return PersistentHashSet(meta, self._map)
EMPTY = PersistentHashSet(nil, persistent_hash_map.EMPTY)
@as_var("set")
def _create(coll):
ret = EMPTY
coll = rt._seq(coll)
while coll is not nil:
ret = ret.conj(rt._first(coll))
coll = rt._seq(rt._next(coll))
return ret
@extend(proto._count, PersistentHashSet)
def _count(self):
assert isinstance(self, PersistentHashSet)
return rt._count(self._map)
@extend(proto._val_at, PersistentHashSet)
def _val_at(self, key, not_found):
assert isinstance(self, PersistentHashSet)
return rt._val_at(self._map, key, not_found)
@extend(proto._contains_key, PersistentHashSet)
def _contains_key(self, key):
assert isinstance(self, PersistentHashSet)
return rt._contains_key(self._map, key)
@extend(proto._eq, PersistentHashSet)
def _eq(self, obj):
assert isinstance(self, PersistentHashSet)
if self is obj:
return true
if not isinstance(obj, PersistentHashSet):
return false
if self._map._cnt != obj._map._cnt:
return false
seq = rt.seq(obj)
while seq is not nil:
if rt._contains_key(self, rt.first(seq)) is false:
return false
seq = rt.next(seq)
return true
@extend(proto._conj, PersistentHashSet)
def _conj(self, v):
assert isinstance(self, PersistentHashSet)
return self.conj(v)
@extend(proto._disj, PersistentHashSet)
def _disj(self, v):
assert isinstance(self, PersistentHashSet)
return self.disj(v)
@extend(proto._reduce, PersistentHashSet)
def _reduce(self, f, init):
assert isinstance(self, PersistentHashSet)
return rt._reduce(rt.keys(self._map), f, init)
@extend(proto._meta, PersistentHashSet)
def _meta(self):
assert isinstance(self, PersistentHashSet)
return self.meta()
@extend(proto._with_meta, PersistentHashSet)
def _with_meta(self, meta):
assert isinstance(self, PersistentHashSet)
return self.with_meta(meta)
| gpl-3.0 | -3,826,382,706,349,838,300 | 26.77 | 67 | 0.681311 | false |
koomik/CouchPotatoServer | couchpotato/core/media/_base/providers/base.py | 1 | 9912 | from urlparse import urlparse
import json
import re
import time
import traceback
import xml.etree.ElementTree as XMLTree
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import ss
from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \
possibleTitles
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
log = CPLog(__name__)
class MultiProvider(Plugin):
def __init__(self):
self._classes = []
for Type in self.getTypes():
klass = Type()
# Overwrite name so logger knows what we're talking about
klass.setName('%s:%s' % (self.getName(), klass.getName()))
self._classes.append(klass)
def getTypes(self):
return []
def getClasses(self):
return self._classes
class Provider(Plugin):
type = None # movie, show, subtitle, trailer, ...
http_time_between_calls = 10 # Default timeout for url requests
last_available_check = {}
is_available = {}
def isAvailable(self, test_url):
if Env.get('dev'): return True
now = time.time()
host = urlparse(test_url).hostname
if self.last_available_check.get(host) < now - 900:
self.last_available_check[host] = now
try:
self.urlopen(test_url, 30)
self.is_available[host] = True
except:
log.error('"%s" unavailable, trying again in an 15 minutes.', host)
self.is_available[host] = False
return self.is_available.get(host, False)
def getJsonData(self, url, decode_from = None, **kwargs):
cache_key = md5(url)
data = self.getCache(cache_key, url, **kwargs)
if data:
try:
data = data.strip()
if decode_from:
data = data.decode(decode_from)
return json.loads(data)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
def getRSSData(self, url, item_path = 'channel/item', **kwargs):
cache_key = md5(url)
data = self.getCache(cache_key, url, **kwargs)
if data and len(data) > 0:
try:
data = XMLTree.fromstring(ss(data))
return self.getElements(data, item_path)
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return []
def getHTMLData(self, url, **kwargs):
cache_key = md5(url)
return self.getCache(cache_key, url, **kwargs)
class YarrProvider(Provider):
protocol = None # nzb, torrent, torrent_magnet
cat_ids = {}
cat_backup_id = None
size_gb = ['gb', 'gib','go']
size_mb = ['mb', 'mib','mo']
size_kb = ['kb', 'kib','ko']
last_login_check = None
def __init__(self):
addEvent('provider.enabled_protocols', self.getEnabledProtocol)
addEvent('provider.belongs_to', self.belongsTo)
addEvent('provider.search.%s.%s' % (self.protocol, self.type), self.search)
def getEnabledProtocol(self):
if self.isEnabled():
return self.protocol
else:
return []
def login(self):
# Check if we are still logged in every hour
now = time.time()
if self.last_login_check and self.last_login_check < (now - 3600):
try:
output = self.urlopen(self.urls['login_check'])
if self.loginCheckSuccess(output):
self.last_login_check = now
return True
except: pass
self.last_login_check = None
if self.last_login_check:
return True
try:
output = self.urlopen(self.urls['login'], data = self.getLoginParams())
if self.loginSuccess(output):
self.last_login_check = now
return True
error = 'unknown'
except:
error = traceback.format_exc()
self.last_login_check = None
log.error('Failed to login %s: %s', (self.getName(), error))
return False
def loginSuccess(self, output):
return True
def loginCheckSuccess(self, output):
return True
def loginDownload(self, url = '', nzb_id = ''):
try:
if not self.login():
log.error('Failed downloading from %s', self.getName())
return self.urlopen(url)
except:
log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return {}
def download(self, url = '', nzb_id = ''):
try:
return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'
def search(self, media, quality):
if self.isDisabled():
return []
# Login if needed
if self.urls.get('login') and not self.login():
log.error('Failed to login to: %s', self.getName())
return []
# Create result container
imdb_results = hasattr(self, '_search')
results = ResultList(self, media, quality, imdb_results = imdb_results)
# Do search based on imdb id
if imdb_results:
self._search(media, quality, results)
# Search possible titles
else:
media_title = fireEvent('library.query', media, include_year = False, single = True)
for title in possibleTitles(media_title):
self._searchOnTitle(title, media, quality, results)
return results
def belongsTo(self, url, provider = None, host = None):
try:
if provider and provider == self.getName():
return self
hostname = urlparse(url).hostname
if host and hostname in host:
return self
else:
for url_type in self.urls:
download_url = self.urls[url_type]
if hostname in download_url:
return self
except:
log.debug('Url %s doesn\'t belong to %s', (url, self.getName()))
return
def parseSize(self, size):
size_raw = size.lower()
size = tryFloat(re.sub(r'[^0-9.]', '', size).strip())
for s in self.size_gb:
if s in size_raw:
return size * 1024
for s in self.size_mb:
if s in size_raw:
return size
for s in self.size_kb:
if s in size_raw:
return size / 1024
return 0
def getCatId(self, quality = None):
if not quality: quality = {}
identifier = quality.get('identifier')
want_3d = False
if quality.get('custom'):
want_3d = quality['custom'].get('3d')
for ids, qualities in self.cat_ids:
if identifier in qualities or (want_3d and '3d' in qualities):
return ids
if self.cat_backup_id:
return [self.cat_backup_id]
return []
class ResultList(list):
result_ids = None
provider = None
media = None
quality = None
def __init__(self, provider, media, quality, **kwargs):
self.result_ids = []
self.provider = provider
self.media = media
self.quality = quality
self.kwargs = kwargs
super(ResultList, self).__init__()
def extend(self, results):
for r in results:
self.append(r)
def append(self, result):
new_result = self.fillResult(result)
is_correct = fireEvent('searcher.correct_release', new_result, self.media, self.quality,
imdb_results = self.kwargs.get('imdb_results', False), single = True)
if is_correct and new_result['id'] not in self.result_ids:
is_correct_weight = float(is_correct)
new_result['score'] += fireEvent('score.calculate', new_result, self.media, single = True)
old_score = new_result['score']
new_result['score'] = int(old_score * is_correct_weight)
log.info('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', (
is_correct_weight,
old_score,
new_result['score']
))
self.found(new_result)
self.result_ids.append(result['id'])
super(ResultList, self).append(new_result)
def fillResult(self, result):
defaults = {
'id': 0,
'protocol': self.provider.protocol,
'type': self.provider.type,
'provider': self.provider.getName(),
'download': self.provider.loginDownload if self.provider.urls.get('login') else self.provider.download,
'seed_ratio': Env.setting('seed_ratio', section = self.provider.getName().lower(), default = ''),
'seed_time': Env.setting('seed_time', section = self.provider.getName().lower(), default = ''),
'url': '',
'name': '',
'age': 0,
'size': 0,
'description': '',
'score': 0
}
return mergeDicts(defaults, result)
def found(self, new_result):
if not new_result.get('provider_extra'):
new_result['provider_extra'] = ''
else:
new_result['provider_extra'] = ', %s' % new_result['provider_extra']
log.info('Found: score(%(score)s) on %(provider)s%(provider_extra)s: %(name)s', new_result)
| gpl-3.0 | -7,051,217,317,254,229,000 | 28.325444 | 115 | 0.55226 | false |
bittner/django-media-tree | media_tree/admin/actions/core_actions.py | 1 | 6256 | from media_tree.models import FileNode
from media_tree.admin.actions.utils import get_actions_context
from media_tree.admin.actions.forms import FileNodeActionsWithUserForm, MoveSelectedForm, CopySelectedForm, ChangeMetadataForSelectedForm
from media_tree.forms import MetadataForm
from media_tree.utils.filenode import get_nested_filenode_list
from django import forms
from django.contrib import messages
from django.utils.translation import ungettext, ugettext as _
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.contrib.admin import helpers
from django.http import HttpResponse, HttpResponseRedirect
def get_current_node(form):
selected_nodes = form.get_selected_nodes()
if len(selected_nodes) > 0:
current_node = selected_nodes[0].parent
if not current_node:
return FileNode.get_top_node()
else:
return current_node
def filenode_admin_action(modeladmin, request, queryset, form_class, extra_context, success_messages, form_initial=None, is_recursive=True):
"""
"""
execute = request.POST.get('execute')
current_node = None
if execute:
if not issubclass(form_class, FileNodeActionsWithUserForm):
form = form_class(queryset, request.POST)
else:
form = form_class(queryset, request.user, request.POST)
if form.is_valid():
form.save()
redirect_node = form.cleaned_data.get('target_node', None)
if not redirect_node:
redirect_node = get_current_node(form)
messages.success(request, message=ungettext(success_messages[0], success_messages[1], form.success_count) % {
'count': form.success_count,
'verbose_name': FileNode._meta.verbose_name,
'verbose_name_plural': FileNode._meta.verbose_name_plural
})
return HttpResponseRedirect(reverse('admin:media_tree_filenode_changelist'))
#return HttpResponseRedirect(reverse('admin:media_tree_filenode_folder_expand', args=(redirect_node.pk,)))
#return HttpResponseRedirect(redirect_node.get_admin_url())
if not execute:
if not issubclass(form_class, FileNodeActionsWithUserForm):
form = form_class(queryset, initial=form_initial)
else:
form = form_class(queryset, request.user, initial=form_initial)
context = get_actions_context(modeladmin)
context.update(extra_context)
context.update({
'breadcrumbs_title': context['title'],
'form': form,
'node': get_current_node(form)
})
if not 'node_list' in context:
if is_recursive:
max_depth = None
else:
max_depth = 1
context['node_list'] = get_nested_filenode_list(form.selected_nodes,
processors=[FileNode.get_admin_link], max_depth=max_depth)
return render_to_response('admin/media_tree/filenode/actions_form.html', context, context_instance=RequestContext(request))
def move_selected(modeladmin, request, queryset):
success_messages = ['%(count)i %(verbose_name)s moved.', '%(count)i %(verbose_name_plural)s moved.']
extra_context = ({
'title': _('Move media objects'),
'submit_label': _('Move'),
})
return filenode_admin_action(modeladmin, request, queryset, MoveSelectedForm, extra_context, success_messages)
move_selected.short_description = _('Move selected %(verbose_name_plural)s')
def copy_selected(modeladmin, request, queryset):
success_messages = ['%(count)i %(verbose_name)s copied.', '%(count)i %(verbose_name_plural)s copied.']
extra_context = ({
'title': _('Copy media objects'),
'submit_label': _('Copy'),
})
return filenode_admin_action(modeladmin, request, queryset, CopySelectedForm, extra_context, success_messages)
copy_selected.short_description = _('Copy selected %(verbose_name_plural)s')
def expand_selected(modeladmin, request, queryset):
expanded_folders_pk = modeladmin.get_expanded_folders_pk(request)
add_pks = [obj.pk for obj in queryset.filter(node_type=FileNode.FOLDER)]
expanded_folders_pk.extend(add_pks)
response = HttpResponseRedirect('')
modeladmin.set_expanded_folders_pk(response, expanded_folders_pk)
return response
expand_selected.short_description = _('Expand selected %(verbose_name_plural)s') % {
'verbose_name_plural': _('folders')}
def collapse_selected(modeladmin, request, queryset):
expanded_folders_pk = modeladmin.get_expanded_folders_pk(request)
remove_pks = [obj.pk for obj in queryset.filter(node_type=FileNode.FOLDER)]
expanded_folders_pk = set(expanded_folders_pk).difference(set(remove_pks))
response = HttpResponseRedirect('')
modeladmin.set_expanded_folders_pk(response, expanded_folders_pk)
return response
collapse_selected.short_description = _('Collapse selected %(verbose_name_plural)s') % {
'verbose_name_plural': _('folders')}
def change_metadata_for_selected(modeladmin, request, queryset):
# TODO Use AdminDateTimeWidget etc
# TODO Should be able to leave required fields blank if confirmation not checked
# Compare all nodes in queryset in order to display initial values
# in form that have an identical value for all nodes
initial = {}
for node in queryset:
for field in node._meta.fields:
if field.editable:
value = getattr(node, field.name)
if not field.name in initial:
initial[field.name] = value
elif value != initial[field.name]:
initial[field.name] = None
success_messages = ['%(count)i %(verbose_name)s changed.', '%(count)i %(verbose_name_plural)s changed.']
extra_context = ({
'title': _('Change metadata for several media objects'),
'submit_label': _('Overwrite selected fields'),
})
return filenode_admin_action(modeladmin, request, queryset,
ChangeMetadataForSelectedForm, extra_context, success_messages, form_initial=initial)
change_metadata_for_selected.short_description = _('Change metadata for selected %(verbose_name_plural)s')
| bsd-3-clause | -9,033,927,466,320,195,000 | 46.037594 | 140 | 0.683664 | false |
sjb3/python_koans | python2/koans/about_regex.py | 79 | 5171 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import re
class AboutRegex(Koan):
"""
These koans are based on Ben's book: Regular Expressions in 10
minutes. I found this book very useful, so I decided to write
a koan file in order to practice everything it taught me.
http://www.forta.com/books/0672325667/
"""
def test_matching_literal_text(self):
"""
Lesson 1 Matching Literal String
"""
string = "Hello, my name is Felix and these koans are based " + \
"on Ben's book: Regular Expressions in 10 minutes."
m = re.search(__, string)
self.assertTrue(
m and m.group(0) and
m.group(0) == 'Felix',
"I want my name")
def test_matching_literal_text_how_many(self):
"""
Lesson 1 -- How many matches?
The default behaviour of most regular expression engines is
to return just the first match. In python you have the
following options:
match() --> Determine if the RE matches at the
beginning of the string.
search() --> Scan through a string, looking for any
location where this RE matches.
findall() --> Find all substrings where the RE
matches, and return them as a list.
finditer() --> Find all substrings where the RE
matches, and return them as an iterator.
"""
string = ("Hello, my name is Felix and these koans are based " +
"on Ben's book: Regular Expressions in 10 minutes. " +
"Repeat My name is Felix")
m = re.match('Felix', string) # TIP: match may not be the best option
# I want to know how many times my name appears
self.assertEqual(m, __)
def test_matching_literal_text_not_case_sensitivity(self):
"""
Lesson 1 -- Matching Literal String non case sensitivity.
Most regex implementations also support matches that are not
case sensitive. In python you can use re.IGNORECASE, in
Javascript you can specify the optional i flag. In Ben's
book you can see more languages.
"""
string = "Hello, my name is Felix or felix and this koan " + \
"is based on Ben's book: Regular Expressions in 10 minutes."
self.assertEqual(re.findall("felix", string), __)
self.assertEqual(re.findall("felix", string, re.IGNORECASE), __)
def test_matching_any_character(self):
"""
Lesson 1: Matching any character
`.` matches any character: alphabetic characters, digits,
and punctuation.
"""
string = "pecks.xlx\n" \
+ "orders1.xls\n" \
+ "apec1.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls"
# I want to find all uses of myArray
change_this_search_string = 'a..xlx'
self.assertEquals(
len(re.findall(change_this_search_string, string)),
3)
def test_matching_set_character(self):
"""
Lesson 2 -- Matching sets of characters
A set of characters is defined using the metacharacters
`[` and `]`. Everything between them is part of the set, and
any single one of the set members will match.
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find all files for North America(na) or South
# America(sa), but not (ca) TIP you can use the pattern .a.
# which matches in above test but in this case matches more than
# you want
change_this_search_string = '[nsc]a[2-9].xls'
self.assertEquals(
len(re.findall(change_this_search_string, string)),
3)
def test_anything_but_matching(self):
"""
Lesson 2 -- Using character set ranges
Occasionally, you'll have a list of characters that you don't
want to match. Character sets can be negated using the ^
metacharacter.
"""
string = "sales.xlx\n" \
+ "sales1.xls\n" \
+ "orders3.xls\n" \
+ "apac1.xls\n" \
+ "sales2.xls\n" \
+ "sales3.xls\n" \
+ "europe2.xls\n" \
+ "sam.xls\n" \
+ "na1.xls\n" \
+ "na2.xls\n" \
+ "sa1.xls\n" \
+ "ca1.xls"
# I want to find the name 'sam'
change_this_search_string = '[^nc]am'
self.assertEquals(
re.findall(change_this_search_string, string),
['sam.xls'])
| mit | 3,334,287,433,442,224,000 | 35.935714 | 78 | 0.514601 | false |
qiyuangong/leetcode | python/962_Maximum_Width_Ramp.py | 2 | 1385 | class Solution(object):
# def maxWidthRamp(self, A):
# """
# :type A: List[int]
# :rtype: int
# """
# # TLE
# if not A or len(A) == 0:
# return 0
# for ans in range(len(A) - 1, 0, -1):
# for i in range(len(A)):
# if i + ans > len(A) - 1:
# break
# if (A[i + ans] >= A[i]):
# return ans
# return 0
def maxWidthRamp(self, A):
ans = 0
m = float('inf')
# Sort index based on value
for i in sorted(range(len(A)), key=A.__getitem__):
ans = max(ans, i - m)
m = min(m, i)
return ans
# def maxWidthRamp(self, A):
# N = len(A)
# ans = 0
# candidates = [(A[N - 1], N - 1)]
# # candidates: i's decreasing, by increasing value of A[i]
# for i in xrange(N - 2, -1, -1):
# # Find largest j in candidates with A[j] >= A[i]
# jx = bisect.bisect(candidates, (A[i],))
# if jx < len(candidates):
# ans = max(ans, candidates[jx][1] - i)
# else:
# candidates.append((A[i], i))
# return ans
if __name__ == '__main__':
s = Solution()
print s.maxWidthRamp([6, 0, 8, 2, 1, 5])
print s.maxWidthRamp([9, 8, 1, 0, 1, 9, 4, 0, 4, 1])
| mit | 246,707,342,427,156,130 | 29.108696 | 67 | 0.41444 | false |
smirnovpaul/flussonic-api-py | api/http.py | 1 | 2788 | # -*- coding: utf-8 -*-
import requests
from .log import LOGGER
from requests.auth import HTTPBasicAuth as Auth
class HttpApi(object):
"""
User and Password are data from Flussonic Server
(see to edit_auth, view_auth).
HTTP Basic auth.
"""
def __init__(self, user, password, url):
self.auth = Auth(user, password)
self.message = None
self.url = 'http://{}/flussonic/api/'.format(url)
self.api = None
@property
def _connect(self):
try:
r = requests.get(''.join((self.url, self.api)), auth=self.auth)
except (requests.RequestException, requests.Timeout) as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
try:
# TODO for stream_health
if r.status_code == 424:
# stream is dead
return False
r.raise_for_status()
except requests.HTTPError as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
try:
response = r.json()
except ValueError as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
else:
return response
def simple_method(self, api, message):
"""
Simple basic method for API.
If need to create something quickly.
"""
self.api = api
self.message = message
return self._connect
def dvr_status(self, year, month, day, stream_name):
self.api = 'dvr_status/{}/{}/{}/{}'.format(year, month, day, stream_name)
self.message = 'Recording map over the past day {}/{}/{}'.format(year, month, day)
return self._connect
def media_info(self, stream_name):
self.api = 'media_info/{}'.format(stream_name)
self.message = 'Stream information'
return self._connect
@property
def server(self):
self.api = 'server'
self.message = 'Server info in JSON format.'
return self._connect
@property
def sessions(self):
self.api = 'sessions'
self.message = 'Number of open sessions'
return self._connect
def sessions_stream(self, stream_name):
self.api = 'sessions?name={}'.format(stream_name)
self.message = 'List of open sessions for a specific stream'
return self._connect
def stream_health(self, stream_name):
self.api = 'stream_health/{}'.format(stream_name)
self.message = 'Stream quality'
return self._connect
@property
def streams(self):
self.api = 'streams'
self.message = 'List of streams, clients and state of this streams'
return self._connect
| mit | 5,657,737,442,988,486,000 | 28.978495 | 90 | 0.574605 | false |
muccg/rdrf | scripts/run_yaml_cde_calculation_tests.py | 1 | 6126 | #!/usr/bin/env python
"""
Runs test files
"""
from __future__ import print_function
from collections import namedtuple
import io
import os.path
import subprocess
import sys
import yaml
import csv
import argparse
import tempfile
import json
def main():
parser = argparse.ArgumentParser(description="Tests CDE calculations in YAML.")
parser.add_argument("registry_yaml", metavar="YAML", nargs=1,
type=argparse.FileType("r"),
help="File containing YAML definition of registry")
parser.add_argument("test_csvs", metavar="CSV", nargs="+",
type=argparse.FileType("r"),
help="CSV files containing test definitions")
parser.add_argument("--outfile", metavar="FILE", type=argparse.FileType("w"),
help="Output file")
parser.add_argument("--verbose", "-v", action="count",
help="More info for debugging tests")
args = parser.parse_args()
if not args.outfile:
args.outfile = sys.stdout
registry = load_yaml(args.registry_yaml[0])
success = True
for infile in args.test_csvs:
success = run_tests(registry, infile, args) and success
return 0 if success else 1
Registry = namedtuple("Registry", ("names", "calculations"))
def load_yaml(file_obj):
calculations = {}
names = {}
data = yaml.load(file_obj)
for cde in data.get("cdes") or []:
if cde.get("code"):
calc = cde.get("calculation")
if calc:
calculations[cde["code"]] = calc
names[cde["code"]] = cde.get("name")
return Registry(names, calculations)
TestResult = namedtuple("TestResult", ("test", "expected", "actual", "error", "output"))
TestCase = namedtuple("TestCase", ("file", "number", "check_code", "params", "desc"))
def run_tests(registry, csv_file, opts):
num_tests = 0
num_success = 0
num_errors = 0
for index, row in enumerate(csv.DictReader(csv_file)):
num_tests += 1
t = setup_test(row, index + 1, csv_file.name)
res = run_test(registry, t)
if res.error:
print_error(registry, res, t.params, opts)
num_errors += 1
elif res.expected == res.actual:
print_success(registry, res, t.params, opts)
num_success += 1
else:
print_failure(registry, res, t.params, opts)
return num_tests == num_success
def setup_test(cols, num, filename):
params = {code: val for code, val in cols.items()
if code not in ("check", "testcase")}
desc = cols.get("testcase", "")
return TestCase(filename, num, cols["check"], params, desc)
def load_adsafe_js():
adsafe_path = "../rdrf/rdrf/static/js/vendor/adsafe-min.js"
return io.open(os.path.join(os.path.dirname(__file__), adsafe_path)).read()
def run_test(registry, test):
context = {code: val for code, val in test.params.items()
if code in registry.names}
patient = {code: val for code, val in test.params.items()
if code not in registry.names}
script = registry.calculations[test.check_code]
script = u"""
"use strict";
var document = {};
var window = { console: console };
%s
var patient = %s;
var context = %s;
var RDRF = ADSAFE;
%s
console.log(context.result);
""" % (load_adsafe_js(), json.dumps(patient), json.dumps(context), script)
success, output = exec_script(script)
if success:
context_result, output = parse_output(output)
return TestResult(test, test.params[test.check_code].strip(), context_result,
False, output)
else:
return TestResult(test, None, None, True, output)
def parse_output(output):
lines = [o.strip() for o in output.split("\n")]
non_empty = filter(bool, lines)
result = non_empty[-1] if non_empty else None
return (result, "\n".join(lines[0:-1]) + "\n")
def exec_script(script):
with tempfile.NamedTemporaryFile(mode="w+", suffix=".js", prefix="registry_test_") as js:
js.write(script)
js.flush()
try:
p = subprocess.Popen(["node", js.name],
stdin=None, close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = p.communicate()
output = output.decode("utf-8", errors="replace")
if p.returncode != 0:
return False, output
return True, output
except OSError as e:
return False, "Couldn't execute %s: %s" % (script, e)
return False, "Test bug"
def print_success(registry, result, params, opts):
if opts.verbose > 1:
log(opts, u"PASS: %s:%s %s\n" % (result.test.file, result.test.number, result.test.desc))
def print_error(registry, result, params, opts):
log(opts, u"ERROR: %s\n" % str(result.output))
def print_failure(registry, result, params, opts):
log(opts,
u"FAIL %s:%s: %s (%s) was \"%s\", expected \"%s\".\n" % (result.test.file,
result.test.number,
result.test.check_code,
registry.names[result.test.check_code],
result.actual,
result.expected))
if opts.verbose:
for param in sorted(params):
if param in registry.names:
log(opts, u" %s (%s) = \"%s\"\n" %
(param, registry.names[param], params[param]))
else:
log(opts, u" Patient %s = \"%s\"\n" % (param, params[param]))
if result.output:
log(opts, result.output)
def log(opts, text):
opts.outfile.write(text)
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | -5,159,092,810,754,563,000 | 31.242105 | 104 | 0.547502 | false |
tgavankar/unifolio | vendor-local/lib/python/pygments/lexers/agile.py | 14 | 80954 | # -*- coding: utf-8 -*-
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Lexers for agile languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
LexerContext, include, combined, do_insertions, bygroups, using
from pygments.token import Error, Text, Other, \
Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation
from pygments.util import get_bool_opt, get_list_opt, shebang_matches
from pygments import unistring as uni
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'RubyLexer', 'RubyConsoleLexer', 'PerlLexer', 'LuaLexer',
'MiniDLexer', 'IoLexer', 'TclLexer', 'ClojureLexer',
'Python3Lexer', 'Python3TracebackLexer', 'FactorLexer', 'IokeLexer']
# b/w compatibility
from pygments.lexers.functional import SchemeLexer
line_re = re.compile('.*?\n')
class PythonLexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code.
"""
name = 'Python'
aliases = ['python', 'py']
filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac']
mimetypes = ['text/x-python', 'application/x-python']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|continue|del|elif|else|except|exec|'
r'finally|for|global|if|lambda|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[a-zA-Z0-9_.]+', Name.Decorator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'((?:\s|\\\s)+)(as)((?:\s|\\\s)+)',
bygroups(Text, Keyword.Namespace, Text)),
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
],
'fromimport': [
(r'((?:\s|\\\s)+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
(r'[a-zA-Z_.][a-zA-Z0-9_.]*', Name.Namespace),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?(2\.\d)?')
class Python3Lexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
*New in Pygments 0.10.*
"""
name = 'Python 3'
aliases = ['python3', 'py3']
filenames = [] # Nothing until Python 3 gets widespread
mimetypes = ['text/x-python3', 'application/x-python3']
flags = re.MULTILINE | re.UNICODE
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
tokens = PythonLexer.tokens.copy()
tokens['keywords'] = [
(r'(assert|break|continue|del|elif|else|except|'
r'finally|for|global|if|lambda|pass|raise|'
r'return|try|while|yield|as|with|True|False|None)\b', Keyword),
]
tokens['builtins'] = [
(r'(?<!\.)(__import__|abs|all|any|bin|bool|bytearray|bytes|'
r'chr|classmethod|cmp|compile|complex|delattr|dict|dir|'
r'divmod|enumerate|eval|filter|float|format|frozenset|getattr|'
r'globals|hasattr|hash|hex|id|input|int|isinstance|issubclass|'
r'iter|len|list|locals|map|max|memoryview|min|next|object|oct|'
r'open|ord|pow|print|property|range|repr|reversed|round|'
r'set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|'
r'vars|zip)\b', Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented)\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|BufferError|BytesWarning|DeprecationWarning|'
r'EOFError|EnvironmentError|Exception|FloatingPointError|'
r'FutureWarning|GeneratorExit|IOError|ImportError|'
r'ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplementedError|OSError|OverflowError|'
r'PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
]
tokens['numbers'] = [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
]
tokens['backtick'] = []
tokens['name'] = [
(r'@[a-zA-Z0-9_]+', Name.Decorator),
(uni_name, Name),
]
tokens['funcname'] = [
(uni_name, Name.Function, '#pop')
]
tokens['classname'] = [
(uni_name, Name.Class, '#pop')
]
tokens['import'] = [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
]
tokens['fromimport'] = [
(r'(\s+)(import)\b', bygroups(Text, Keyword), '#pop'),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
]
# don't highlight "%s" substitutions
tokens['strings'] = [
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
]
def analyse_text(text):
return shebang_matches(text, r'pythonw?3(\.\d)?')
class PythonConsoleLexer(Lexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print a
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``False``.
*New in Pygments 1.0.*
"""
name = 'Python console session'
aliases = ['pycon']
mimetypes = ['text/x-python-doctest']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
if self.python3:
pylexer = Python3Lexer(**self.options)
tblexer = Python3TracebackLexer(**self.options)
else:
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
curtb = ''
tbindex = 0
tb = 0
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'>>> ') or line.startswith(u'... '):
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:4])]))
curcode += line[4:]
elif line.rstrip() == u'...' and not tb:
# only a new >>> prompt can end an exception block
# otherwise an ellipsis in place of the traceback frames
# will be mishandled
insertions.append((len(curcode),
[(0, Generic.Prompt, u'...')]))
curcode += line[3:]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if (line.startswith(u'Traceback (most recent call last):') or
re.match(ur' File "[^"]+", line \d+\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
elif line == 'KeyboardInterrupt\n':
yield match.start(), Name.Class, line
elif tb:
curtb += line
if not (line.startswith(' ') or line.strip() == u'...'):
tb = 0
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
class PythonTracebackLexer(RegexLexer):
"""
For Python tracebacks.
*New in Pygments 0.7.*
"""
name = 'Python Traceback'
aliases = ['pytb']
filenames = ['*.pytb']
mimetypes = ['text/x-python-traceback']
tokens = {
'root': [
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
# SyntaxError starts with this.
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
(r'^.*\n', Other),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(PythonLexer), Text)),
(r'^([ \t]*)(...)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
}
class Python3TracebackLexer(RegexLexer):
"""
For Python 3.0 tracebacks, with support for chained exceptions.
*New in Pygments 1.0.*
"""
name = 'Python 3.0 Traceback'
aliases = ['py3tb']
filenames = ['*.py3tb']
mimetypes = ['text/x-python3-traceback']
tokens = {
'root': [
(r'\n', Text),
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
(r'^During handling of the above exception, another '
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python3Lexer), Text)),
(r'^([ \t]*)(...)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
}
class RubyLexer(ExtendedRegexLexer):
"""
For `Ruby <http://www.ruby-lang.org>`_ source code.
"""
name = 'Ruby'
aliases = ['rb', 'ruby', 'duby']
filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec',
'*.rbx', '*.duby']
mimetypes = ['text/x-ruby', 'application/x-ruby']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Ruby...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), Name.Constant, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), Name.Constant, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_rubystrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
# easy ones
(r'\:([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, name in ('\\{', '\\}', 'cb'), \
('\\[', '\\]', 'sb'), \
('\\(', '\\)', 'pa'), \
('<', '>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[qsw]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Regex),
(r'(?<!\\)' + lbrace, String.Regex, '#push'),
(r'(?<!\\)' + rbrace + '[mixounse]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + lbrace + rbrace + ']', String.Regex),
(r'[^\\#' + lbrace + rbrace + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'%[qsw]([^a-zA-Z0-9])((?:\\\1|(?!\1).)*)\1', String.Other),
(r'(%[QWx]([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'=begin\s.*?\n=end', Comment.Multiline),
# keywords
(r'(BEGIN|END|alias|begin|break|case|defined\?|'
r'do|else|elsif|end|ensure|for|if|in|next|redo|'
r'rescue|raise|retry|return|super|then|undef|unless|until|when|'
r'while|yield)\b', Keyword),
# start of function, class and module names
(r'(module)(\s+)([a-zA-Z_][a-zA-Z0-9_]*(::[a-zA-Z_][a-zA-Z0-9_]*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
# special methods
(r'(initialize|new|loop|include|extend|raise|attr_reader|'
r'attr_writer|attr_accessor|attr|catch|throw|private|'
r'module_function|public|protected|true|false|nil)\b', Keyword.Pseudo),
(r'(not|and|or)\b', Operator.Word),
(r'(autoload|block_given|const_defined|eql|equal|frozen|include|'
r'instance_of|is_a|iterator|kind_of|method_defined|nil|'
r'private_method_defined|protected_method_defined|'
r'public_method_defined|respond_to|tainted)\?', Name.Builtin),
(r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
(r'(?<!\.)(Array|Float|Integer|String|__id__|__send__|abort|ancestors|'
r'at_exit|autoload|binding|callcc|caller|'
r'catch|chomp|chop|class_eval|class_variables|'
r'clone|const_defined\?|const_get|const_missing|const_set|constants|'
r'display|dup|eval|exec|exit|extend|fail|fork|'
r'format|freeze|getc|gets|global_variables|gsub|'
r'hash|id|included_modules|inspect|instance_eval|'
r'instance_method|instance_methods|'
r'instance_variable_get|instance_variable_set|instance_variables|'
r'lambda|load|local_variables|loop|'
r'method|method_missing|methods|module_eval|name|'
r'object_id|open|p|print|printf|private_class_method|'
r'private_instance_methods|'
r'private_methods|proc|protected_instance_methods|'
r'protected_methods|public_class_method|'
r'public_instance_methods|public_methods|'
r'putc|puts|raise|rand|readline|readlines|require|'
r'scan|select|self|send|set_trace_func|singleton_methods|sleep|'
r'split|sprintf|srand|sub|syscall|system|taint|'
r'test|throw|to_a|to_s|trace_var|trap|untaint|untrace_var|'
r'warn)\b', Name.Builtin),
(r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)', heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=(?:\s|;|\.)index\s)|'
r'(?<=(?:\s|;|\.)scan\s)|'
r'(?<=(?:\s|;|\.)sub\s)|'
r'(?<=(?:\s|;|\.)sub!\s)|'
r'(?<=(?:\s|;|\.)gsub\s)|'
r'(?<=(?:\s|;|\.)gsub!\s)|'
r'(?<=(?:\s|;|\.)match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls)
(r'(?<=\(|,)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/[^\s=])', String.Regex, 'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance),
(r'\$[a-zA-Z0-9_]+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][a-zA-Z0-9_]+', Name.Constant),
# this is needed because ruby attributes can look
# like keywords (class) or like this: ` ?!?
(r'(\.|::)([a-zA-Z_]\w*[\!\?]?|[*%&^`~+-/\[<>=])',
bygroups(Operator, Name)),
(r'[a-zA-Z_][\w_]*[\!\?]?', Name),
(r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'\(', Punctuation, 'defexpr'),
(r'(?:([a-zA-Z_][a-zA-Z0-9_]*)(\.))?'
r'([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
(r'', Text, '#pop')
],
'classname': [
(r'\(', Punctuation, 'defexpr'),
(r'<<', Operator, '#pop'),
(r'[A-Z_][\w_]*', Name.Class, '#pop'),
(r'', Text, '#pop')
],
'defexpr': [
(r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
(r'\(', Operator, '#push'),
include('root')
],
'in-intp': [
('}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#{', String.Interpol, 'in-intp'),
(r'#@@?[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol),
(r'#\$[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol)
],
'string-intp-escaped': [
include('string-intp'),
(r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape)
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[mixounse]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
tokens.update(gen_rubystrings_rules())
def analyse_text(text):
return shebang_matches(text, r'ruby(1\.\d)?')
class RubyConsoleLexer(Lexer):
"""
For Ruby interactive console (**irb**) output like:
.. sourcecode:: rbcon
irb(main):001:0> a = 1
=> 1
irb(main):002:0> puts a
1
=> nil
"""
name = 'Ruby irb session'
aliases = ['rbcon', 'irb']
mimetypes = ['text/x-ruby-shellsession']
_prompt_re = re.compile('irb\([a-zA-Z_][a-zA-Z0-9_]*\):\d{3}:\d+[>*"\'] '
'|>> |\?> ')
def get_tokens_unprocessed(self, text):
rblexer = RubyLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
class PerlLexer(RegexLexer):
"""
For `Perl <http://www.perl.org>`_ source code.
"""
name = 'Perl'
aliases = ['perl', 'pl']
filenames = ['*.pl', '*.pm']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
# TODO: give this a perl guy who knows how to parse perl...
tokens = {
'balanced-regex': [
(r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'{(\\\\|\\}|[^}])*}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\\)|[^\)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\#.*?$', Comment.Single),
(r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
(r'(case|continue|do|else|elsif|for|foreach|if|last|my|'
r'next|our|redo|reset|then|unless|until|while|use|'
r'print|new|BEGIN|CHECK|INIT|END|return)\b', Keyword),
(r'(format)(\s+)([a-zA-Z0-9_]+)(\s*)(=)(\s*\n)',
bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'),
(r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
# common delimiters
(r's/(\\\\|\\/|[^/])*/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex),
(r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
(r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
(r's@(\\\\|\\@|[^@])*@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex),
(r's%(\\\\|\\%|[^%])*%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex),
# balanced delimiters
(r's{(\\\\|\\}|[^}])*}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'),
(r'((?<==~)|(?<=\())\s*/(\\\\|\\/|[^/])*/[gcimosx]*', String.Regex),
(r'\s+', Text),
(r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|'
r'chmod|chomp|chop|chown|chr|chroot|close|closedir|connect|'
r'continue|cos|crypt|dbmclose|dbmopen|defined|delete|die|'
r'dump|each|endgrent|endhostent|endnetent|endprotoent|'
r'endpwent|endservent|eof|eval|exec|exists|exit|exp|fcntl|'
r'fileno|flock|fork|format|formline|getc|getgrent|getgrgid|'
r'getgrnam|gethostbyaddr|gethostbyname|gethostent|getlogin|'
r'getnetbyaddr|getnetbyname|getnetent|getpeername|getpgrp|'
r'getppid|getpriority|getprotobyname|getprotobynumber|'
r'getprotoent|getpwent|getpwnam|getpwuid|getservbyname|'
r'getservbyport|getservent|getsockname|getsockopt|glob|gmtime|'
r'goto|grep|hex|import|index|int|ioctl|join|keys|kill|last|'
r'lc|lcfirst|length|link|listen|local|localtime|log|lstat|'
r'map|mkdir|msgctl|msgget|msgrcv|msgsnd|my|next|no|oct|open|'
r'opendir|ord|our|pack|package|pipe|pop|pos|printf|'
r'prototype|push|quotemeta|rand|read|readdir|'
r'readline|readlink|readpipe|recv|redo|ref|rename|require|'
r'reverse|rewinddir|rindex|rmdir|scalar|seek|seekdir|'
r'select|semctl|semget|semop|send|setgrent|sethostent|setnetent|'
r'setpgrp|setpriority|setprotoent|setpwent|setservent|'
r'setsockopt|shift|shmctl|shmget|shmread|shmwrite|shutdown|'
r'sin|sleep|socket|socketpair|sort|splice|split|sprintf|sqrt|'
r'srand|stat|study|substr|symlink|syscall|sysopen|sysread|'
r'sysseek|system|syswrite|tell|telldir|tie|tied|time|times|tr|'
r'truncate|uc|ucfirst|umask|undef|unlink|unpack|unshift|untie|'
r'utime|values|vec|wait|waitpid|wantarray|warn|write'
r')\b', Name.Builtin),
(r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
(r'<<([\'"]?)([a-zA-Z_][a-zA-Z0-9_]*)\1;?\n.*?\n\2\n', String),
(r'__END__', Comment.Preproc, 'end-part'),
(r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
(r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
(r'[$@%#]+', Name.Variable, 'varname'),
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
Number.Float),
(r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
(r'\d+(_\d+)*', Number.Integer),
(r"'(\\\\|\\'|[^'])*'", String),
(r'"(\\\\|\\"|[^"])*"', String),
(r'`(\\\\|\\`|[^`])*`', String.Backtick),
(r'<([^\s>]+)>', String.Regex),
(r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
(r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
(r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
(r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
(r'(q|qq|qw|qr|qx)([^a-zA-Z0-9])(.|\n)*?\2', String.Other),
(r'package\s+', Keyword, 'modulename'),
(r'sub\s+', Keyword, 'funcname'),
(r'(\[\]|\*\*|::|<<|>>|>=|<=|<=>|={3}|!=|=~|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&^|!\\~]=?', Operator),
(r'[\(\)\[\]:;,<>/\?\{\}]', Punctuation), # yes, there's no shortage
# of punctuation in Perl!
(r'(?=\w)', Name, 'name'),
],
'format': [
(r'\.\n', String.Interpol, '#pop'),
(r'[^\n]*\n', String.Interpol),
],
'varname': [
(r'\s+', Text),
(r'\{', Punctuation, '#pop'), # hash syntax?
(r'\)|,', Punctuation, '#pop'), # argument specifier
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name.Variable, '#pop'),
],
'name': [
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name, '#pop'),
(r'[A-Z_]+(?=[^a-zA-Z0-9_])', Name.Constant, '#pop'),
(r'(?=[^a-zA-Z0-9_])', Text, '#pop'),
],
'modulename': [
(r'[a-zA-Z_][\w_]*', Name.Namespace, '#pop')
],
'funcname': [
(r'[a-zA-Z_][\w_]*[\!\?]?', Name.Function),
(r'\s+', Text),
# argument declaration
(r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)),
(r'.*?{', Punctuation, '#pop'),
(r';', Punctuation, '#pop'),
],
'cb-string': [
(r'\\[\{\}\\]', String.Other),
(r'\\', String.Other),
(r'\{', String.Other, 'cb-string'),
(r'\}', String.Other, '#pop'),
(r'[^\{\}\\]+', String.Other)
],
'rb-string': [
(r'\\[\(\)\\]', String.Other),
(r'\\', String.Other),
(r'\(', String.Other, 'rb-string'),
(r'\)', String.Other, '#pop'),
(r'[^\(\)]+', String.Other)
],
'sb-string': [
(r'\\[\[\]\\]', String.Other),
(r'\\', String.Other),
(r'\[', String.Other, 'sb-string'),
(r'\]', String.Other, '#pop'),
(r'[^\[\]]+', String.Other)
],
'lt-string': [
(r'\\[\<\>\\]', String.Other),
(r'\\', String.Other),
(r'\<', String.Other, 'lt-string'),
(r'\>', String.Other, '#pop'),
(r'[^\<\>]+', String.Other)
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
def analyse_text(text):
if shebang_matches(text, r'perl'):
return True
if 'my $' in text:
return 0.9
return 0.1 # who knows, might still be perl!
class LuaLexer(RegexLexer):
"""
For `Lua <http://www.lua.org>`_ source code.
Additional options accepted:
`func_name_highlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabled_modules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted.
To get a list of allowed modules have a look into the
`_luabuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._luabuiltins import MODULES
>>> MODULES.keys()
['string', 'coroutine', 'modules', 'io', 'basic', ...]
"""
name = 'Lua'
aliases = ['lua']
filenames = ['*.lua', '*.wlua']
mimetypes = ['text/x-lua', 'application/x-lua']
tokens = {
'root': [
# lua allows a file to start with a shebang
(r'#!(.*?)$', Comment.Preproc),
(r'', Text, 'base'),
],
'base': [
(r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
('(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]', Text),
# multiline strings
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(==|~=|<=|>=|\.\.|\.\.\.|[=+\-*/%^<>#])', Operator),
(r'[\[\]\{\}\(\)\.,:;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|'
r'while)\b', Keyword),
(r'(local)\b', Keyword.Declaration),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(function)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'funcname': [
('(?:([A-Za-z_][A-Za-z0-9_]*)(\.))?([A-Za-z_][A-Za-z0-9_]*)',
bygroups(Name.Class, Punctuation, Name.Function), '#pop'),
# inline function
('\(', Punctuation, '#pop'),
],
'classname': [
('[A-Za-z_][A-Za-z0-9_]*', Name.Class, '#pop')
],
# if I understand correctly, every character is valid in a lua string,
# so this state is only for later corrections
'string': [
('.', String)
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String, '#pop'),
include('string')
],
'dqs': [
('"', String, '#pop'),
include('string')
]
}
def __init__(self, **options):
self.func_name_highlighting = get_bool_opt(
options, 'func_name_highlighting', True)
self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._luabuiltins import MODULES
for mod, func in MODULES.iteritems():
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self._functions:
yield index, Name.Builtin, value
continue
elif '.' in value:
a, b = value.split('.')
yield index, Name, a
yield index + len(a), Punctuation, u'.'
yield index + len(a) + 1, Name, b
continue
yield index, token, value
class MiniDLexer(RegexLexer):
"""
For `MiniD <http://www.dsource.org/projects/minid>`_ (a D-like scripting
language) source.
"""
name = 'MiniD'
filenames = ['*.md']
aliases = ['minid']
mimetypes = ['text/x-minidsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# Keywords
(r'(as|assert|break|case|catch|class|continue|coroutine|default'
r'|do|else|finally|for|foreach|function|global|namespace'
r'|if|import|in|is|local|module|return|super|switch'
r'|this|throw|try|vararg|while|with|yield)\b', Keyword),
(r'(false|true|null)\b', Keyword.Constant),
# FloatLiteral
(r'([0-9][0-9_]*)?\.[0-9_]+([eE][+\-]?[0-9_]+)?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number),
# -- Octal
(r'0[Cc][0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'@"(""|.)*"', String),
# -- AlternateWysiwygString
(r'`(``|.)*`', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(
r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
}
class IoLexer(RegexLexer):
"""
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
*New in Pygments 0.10.*
"""
name = 'Io'
filenames = ['*.io']
aliases = ['io']
mimetypes = ['text/x-iosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'#(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Operators
(r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
Operator),
# keywords
(r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
Keyword),
# constants
(r'(nil|false|true)\b', Name.Constant),
# names
('(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
]
}
class TclLexer(RegexLexer):
"""
For Tcl source code.
*New in Pygments 0.10.*
"""
keyword_cmds_re = (
r'\b(after|apply|array|break|catch|continue|elseif|else|error|'
r'eval|expr|for|foreach|global|if|namespace|proc|rename|return|'
r'set|switch|then|trace|unset|update|uplevel|upvar|variable|'
r'vwait|while)\b'
)
builtin_cmds_re = (
r'\b(append|bgerror|binary|cd|chan|clock|close|concat|dde|dict|'
r'encoding|eof|exec|exit|fblocked|fconfigure|fcopy|file|'
r'fileevent|flush|format|gets|glob|history|http|incr|info|interp|'
r'join|lappend|lassign|lindex|linsert|list|llength|load|loadTk|'
r'lrange|lrepeat|lreplace|lreverse|lsearch|lset|lsort|mathfunc|'
r'mathop|memory|msgcat|open|package|pid|pkg::create|pkg_mkIndex|'
r'platform|platform::shell|puts|pwd|re_syntax|read|refchan|'
r'regexp|registry|regsub|scan|seek|socket|source|split|string|'
r'subst|tell|time|tm|unknown|unload)\b'
)
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w\.\-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w\.\-\:]+)', Name.Variable),
(r'([\w\.\-\:]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
*New in Pygments 0.11.*
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
filenames = ['*.clj']
mimetypes = ['text/x-clojure', 'application/x-clojure']
keywords = [
'fn', 'def', 'defn', 'defmacro', 'defmethod', 'defmulti', 'defn-',
'defstruct',
'if', 'cond',
'let', 'for'
]
builtins = [
'.', '..',
'*', '+', '-', '->', '..', '/', '<', '<=', '=', '==', '>', '>=',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush',
'fnseq', 'frest', 'gensym', 'get', 'get-proxy-class',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list', 'list*', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper']
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~-]+'
tokens = {
'root' : [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
#(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\([()/'\".'_!§$%& ?;=#+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
*New in Pygments 1.4.*
"""
name = 'Factor'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
flags = re.MULTILINE | re.UNICODE
builtin_kernel = (
r'(?:or|2bi|2tri|while|wrapper|nip|4dip|wrapper\\?|bi\\*|'
r'callstack>array|both\\?|hashcode|die|dupd|callstack|'
r'callstack\\?|3dup|tri@|pick|curry|build|\\?execute|3bi|'
r'prepose|>boolean|\\?if|clone|eq\\?|tri\\*|\\?|=|swapd|'
r'2over|2keep|3keep|clear|2dup|when|not|tuple\\?|dup|2bi\\*|'
r'2tri\\*|call|tri-curry|object|bi@|do|unless\\*|if\\*|loop|'
r'bi-curry\\*|drop|when\\*|assert=|retainstack|assert\\?|-rot|'
r'execute|2bi@|2tri@|boa|with|either\\?|3drop|bi|curry\\?|'
r'datastack|until|3dip|over|3curry|tri-curry\\*|tri-curry@|swap|'
r'and|2nip|throw|bi-curry|\\(clone\\)|hashcode\\*|compose|2dip|if|3tri|'
r'unless|compose\\?|tuple|keep|2curry|equal\\?|assert|tri|2drop|'
r'most|<wrapper>|boolean\\?|identity-hashcode|identity-tuple\\?|'
r'null|new|dip|bi-curry@|rot|xor|identity-tuple|boolean)\s'
)
builtin_assocs = (
r'(?:\\?at|assoc\\?|assoc-clone-like|assoc=|delete-at\\*|'
r'assoc-partition|extract-keys|new-assoc|value\\?|assoc-size|'
r'map>assoc|push-at|assoc-like|key\\?|assoc-intersect|'
r'assoc-refine|update|assoc-union|assoc-combine|at\\*|'
r'assoc-empty\\?|at\\+|set-at|assoc-all\\?|assoc-subset\\?|'
r'assoc-hashcode|change-at|assoc-each|assoc-diff|zip|values|'
r'value-at|rename-at|inc-at|enum\\?|at|cache|assoc>map|<enum>|'
r'assoc|assoc-map|enum|value-at\\*|assoc-map-as|>alist|'
r'assoc-filter-as|clear-assoc|assoc-stack|maybe-set-at|'
r'substitute|assoc-filter|2cache|delete-at|assoc-find|keys|'
r'assoc-any\\?|unzip)\s'
)
builtin_combinators = (
r'(?:case|execute-effect|no-cond|no-case\\?|3cleave>quot|2cleave|'
r'cond>quot|wrong-values\\?|no-cond\\?|cleave>quot|no-case|'
r'case>quot|3cleave|wrong-values|to-fixed-point|alist>quot|'
r'case-find|cond|cleave|call-effect|2cleave>quot|recursive-hashcode|'
r'linear-case-quot|spread|spread>quot)\s'
)
builtin_math = (
r'(?:number=|if-zero|next-power-of-2|each-integer|\\?1\\+|'
r'fp-special\\?|imaginary-part|unless-zero|float>bits|number\\?|'
r'fp-infinity\\?|bignum\\?|fp-snan\\?|denominator|fp-bitwise=|\\*|'
r'\\+|power-of-2\\?|-|u>=|/|>=|bitand|log2-expects-positive|<|'
r'log2|>|integer\\?|number|bits>double|2/|zero\\?|(find-integer)|'
r'bits>float|float\\?|shift|ratio\\?|even\\?|ratio|fp-sign|bitnot|'
r'>fixnum|complex\\?|/i|/f|byte-array>bignum|when-zero|sgn|>bignum|'
r'next-float|u<|u>|mod|recip|rational|find-last-integer|>float|'
r'(all-integers\\?)|2^|times|integer|fixnum\\?|neg|fixnum|sq|'
r'bignum|(each-integer)|bit\\?|fp-qnan\\?|find-integer|complex|'
r'<fp-nan>|real|double>bits|bitor|rem|fp-nan-payload|all-integers\\?|'
r'real-part|log2-expects-positive\\?|prev-float|align|unordered\\?|'
r'float|fp-nan\\?|abs|bitxor|u<=|odd\\?|<=|/mod|rational\\?|>integer|'
r'real\\?|numerator)\s'
)
builtin_sequences = (
r'(?:member-eq\\?|append|assert-sequence=|find-last-from|trim-head-slice|'
r'clone-like|3sequence|assert-sequence\\?|map-as|last-index-from|'
r'reversed|index-from|cut\\*|pad-tail|remove-eq!|concat-as|'
r'but-last|snip|trim-tail|nths|nth|2selector|sequence|slice\\?|'
r'<slice>|partition|remove-nth|tail-slice|empty\\?|tail\\*|'
r'if-empty|find-from|virtual-sequence\\?|member\\?|set-length|'
r'drop-prefix|unclip|unclip-last-slice|iota|map-sum|'
r'bounds-error\\?|sequence-hashcode-step|selector-for|'
r'accumulate-as|map|start|midpoint@|\\(accumulate\\)|rest-slice|'
r'prepend|fourth|sift|accumulate!|new-sequence|follow|map!|'
r'like|first4|1sequence|reverse|slice|unless-empty|padding|'
r'virtual@|repetition\\?|set-last|index|4sequence|max-length|'
r'set-second|immutable-sequence|first2|first3|replicate-as|'
r'reduce-index|unclip-slice|supremum|suffix!|insert-nth|'
r'trim-tail-slice|tail|3append|short|count|suffix|concat|'
r'flip|filter|sum|immutable\\?|reverse!|2sequence|map-integers|'
r'delete-all|start\\*|indices|snip-slice|check-slice|sequence\\?|'
r'head|map-find|filter!|append-as|reduce|sequence=|halves|'
r'collapse-slice|interleave|2map|filter-as|binary-reduce|'
r'slice-error\\?|product|bounds-check\\?|bounds-check|harvest|'
r'immutable|virtual-exemplar|find|produce|remove|pad-head|last|'
r'replicate|set-fourth|remove-eq|shorten|reversed\\?|'
r'map-find-last|3map-as|2unclip-slice|shorter\\?|3map|find-last|'
r'head-slice|pop\\*|2map-as|tail-slice\\*|but-last-slice|'
r'2map-reduce|iota\\?|collector-for|accumulate|each|selector|'
r'append!|new-resizable|cut-slice|each-index|head-slice\\*|'
r'2reverse-each|sequence-hashcode|pop|set-nth|\\?nth|'
r'<flat-slice>|second|join|when-empty|collector|'
r'immutable-sequence\\?|<reversed>|all\\?|3append-as|'
r'virtual-sequence|subseq\\?|remove-nth!|push-either|new-like|'
r'length|last-index|push-if|2all\\?|lengthen|assert-sequence|'
r'copy|map-reduce|move|third|first|3each|tail\\?|set-first|'
r'prefix|bounds-error|any\\?|<repetition>|trim-slice|exchange|'
r'surround|2reduce|cut|change-nth|min-length|set-third|produce-as|'
r'push-all|head\\?|delete-slice|rest|sum-lengths|2each|head\\*|'
r'infimum|remove!|glue|slice-error|subseq|trim|replace-slice|'
r'push|repetition|map-index|trim-head|unclip-last|mismatch)\s'
)
builtin_namespaces = (
r'(?:global|\\+@|change|set-namestack|change-global|init-namespaces|'
r'on|off|set-global|namespace|set|with-scope|bind|with-variable|'
r'inc|dec|counter|initialize|namestack|get|get-global|make-assoc)\s'
)
builtin_arrays = (
r'(?:<array>|2array|3array|pair|>array|1array|4array|pair\\?|'
r'array|resize-array|array\\?)\s'
)
builtin_io = (
r'(?:\\+character\\+|bad-seek-type\\?|readln|each-morsel|stream-seek|'
r'read|print|with-output-stream|contents|write1|stream-write1|'
r'stream-copy|stream-element-type|with-input-stream|'
r'stream-print|stream-read|stream-contents|stream-tell|'
r'tell-output|bl|seek-output|bad-seek-type|nl|stream-nl|write|'
r'flush|stream-lines|\\+byte\\+|stream-flush|read1|'
r'seek-absolute\\?|stream-read1|lines|stream-readln|'
r'stream-read-until|each-line|seek-end|with-output-stream\\*|'
r'seek-absolute|with-streams|seek-input|seek-relative\\?|'
r'input-stream|stream-write|read-partial|seek-end\\?|'
r'seek-relative|error-stream|read-until|with-input-stream\\*|'
r'with-streams\\*|tell-input|each-block|output-stream|'
r'stream-read-partial|each-stream-block|each-stream-line)\s'
)
builtin_strings = (
r'(?:resize-string|>string|<string>|1string|string|string\\?)\s'
)
builtin_vectors = (
r'(?:vector\\?|<vector>|\\?push|vector|>vector|1vector)\s'
)
builtin_continuations = (
r'(?:with-return|restarts|return-continuation|with-datastack|'
r'recover|rethrow-restarts|<restart>|ifcc|set-catchstack|'
r'>continuation<|cleanup|ignore-errors|restart\\?|'
r'compute-restarts|attempt-all-error|error-thread|continue|'
r'<continuation>|attempt-all-error\\?|condition\\?|'
r'<condition>|throw-restarts|error|catchstack|continue-with|'
r'thread-error-hook|continuation|rethrow|callcc1|'
r'error-continuation|callcc0|attempt-all|condition|'
r'continuation\\?|restart|return)\s'
)
tokens = {
'root': [
# TODO: (( inputs -- outputs ))
# TODO: << ... >>
# defining words
(r'(\s*)(:|::|MACRO:|MEMO:)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Function)),
(r'(\s*)(M:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Class, Text, Name.Function)),
(r'(\s*)(GENERIC:)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Function)),
(r'(\s*)(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Function, Text, Name.Function)),
(r'(\()(\s+)', bygroups(Name.Function, Text), 'stackeffect'),
(r'\;\s', Keyword),
# imports and namespaces
(r'(USING:)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(USE:)(\s+)(\S+)', bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(UNUSE:)(\s+)(\S+)', bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+)(=>)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Text)),
(r'(IN:)(\s+)(\S+)', bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(?:ALIAS|DEFER|FORGET|POSTPONE):', Keyword.Namespace),
# tuples and classes
(r'(TUPLE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'),
(r'(TUPLE:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class), 'slots'),
(r'(UNION:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(INTERSECTION:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'INSTANCE:', Keyword),
(r'SLOT:', Keyword),
(r'MIXIN:', Keyword),
(r'(?:SINGLETON|SINGLETONS):', Keyword),
# other syntax
(r'CONSTANT:', Keyword),
(r'(?:SYMBOL|SYMBOLS):', Keyword),
(r'ERROR:', Keyword),
(r'SYNTAX:', Keyword),
(r'(HELP:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)),
(r'(MAIN:)(\s+)(\S+)', bygroups(Keyword.Namespace, Text, Name.Function)),
(r'(?:ALIEN|TYPEDEF|FUNCTION|STRUCT):', Keyword),
# vocab.private
# TODO: words inside vocab.private should have red names?
(r'(?:<PRIVATE|PRIVATE>)', Keyword.Namespace),
# strings
(r'"""\s+(?:.|\n)*?\s+"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(\\[\\abfnrstv]*|\S)\s', String.Char),
# comments
(r'\!\s+.*$', Comment),
(r'#\!\s+.*$', Comment),
# boolean constants
(r'(t|f)\s', Name.Constant),
# numbers
(r'-?\d+\.\d+\s', Number.Float),
(r'-?\d+\s', Number.Integer),
(r'HEX:\s+[a-fA-F\d]+\s', Number.Hex),
(r'BIN:\s+[01]+\s', Number.Integer),
(r'OCT:\s+[0-7]+\s', Number.Oct),
# operators
(r'[-+/*=<>^]\s', Operator),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s', Keyword),
# builtins
(builtin_kernel, Name.Builtin),
(builtin_assocs, Name.Builtin),
(builtin_combinators, Name.Builtin),
(builtin_math, Name.Builtin),
(builtin_sequences, Name.Builtin),
(builtin_namespaces, Name.Builtin),
(builtin_arrays, Name.Builtin),
(builtin_io, Name.Builtin),
(builtin_strings, Name.Builtin),
(builtin_vectors, Name.Builtin),
(builtin_continuations, Name.Builtin),
# whitespaces - usually not relevant
(r'\s+', Text),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s*\(', Name.Function, 'stackeffect'),
(r'\)', Name.Function, '#pop'),
(r'\-\-', Name.Function),
(r'\s+', Text),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Variable),
],
'import': [
(r';', Keyword, '#pop'),
(r'\S+', Name.Namespace),
(r'\s+', Text),
],
}
class IokeLexer(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
*New in Pygments 1.4.*
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
#Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
#Symbols
(r':[a-zA-Z0-9_!:?]+', String.Symbol),
(r'[a-zA-Z0-9_!:?]+:(?![a-zA-Z0-9_!?])', String.Other),
(r':"(\\\\|\\"|[^"])*"', String.Symbol),
#Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
r'|(?<=dsyntax\())[\s\n\r]*"', String.Doc, 'documentation'),
#Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
#Mimic
(r'[a-zA-Z0-9_][a-zA-Z0-9!?_:]+(?=\s*=.*mimic\s)', Name.Entity),
#Assignment
(r'[a-zA-Z_][a-zA-Z0-9_!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))', Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
r'with)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![a-zA-Z0-9!:_?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Literals
(r'(dict|list|message|set)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
r'case:otherwise|case:xor)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Aspects
(r'(after|around|before)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
#DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
r'documentation|identity|removeCell!|undefineCell)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|'
r'internal:createDecimal|internal:createNumber|'
r'internal:createRegexp|internal:createText)'
r'(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|'
r'invokeRestart|rescue|restart|signal\!|warn\!)'
r'(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![a-zA-Z0-9!:_?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
r'Conditions|Definitions|FlowControl|Internal|Literals|'
r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
r'System|Text|Tuple)(?![a-zA-Z0-9!:_?])', Name.Builtin),
# functions
(ur'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
ur'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
ur'(?![a-zA-Z0-9!:_?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(ur'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
ur'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
ur'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
ur'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
ur'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
ur'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
ur'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])',
Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@|@@|\[|\]|\(|\)|{|})', Punctuation),
#kinds
(r'[A-Z][a-zA-Z0-9_!:?]*', Name.Class),
#default cellnames
(r'[a-z_][a-zA-Z0-9_!:?]*', Name)
]
}
| bsd-3-clause | -6,039,682,143,026,706,000 | 40.323124 | 87 | 0.471848 | false |
cedrick-f/pySequence | src/pyXorga.py | 1 | 65552 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
##This file is part of pyXorga
#############################################################################
#############################################################################
## ##
## pyXorga ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2014 Cédrick FAURY
# pyXorga is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# pyXorga is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyXorga; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
pyXorga.py
Organiser ses fichiers à l'aide de Xmind
*************
Copyright (C) 2014 - 2015
@author: Cedrick FAURY
"""
import version
####################################################################################
#
# Import des modules nécessaires
#
####################################################################################
# GUI wxpython
import wx
# sdk
import xmind
from xmind.core import workbook,saver, loader
from xmind.core.markerref import MarkerId, MarkerRefElement
from xmind.core.topic import TopicElement
from xmind import utils
import Images
import subprocess
# mekk
#from mekk.xmind import XMindDocument
import sys
import os
import os.path
import glob
import ConfigParser
import shutil
import codecs
print "defaultencoding", sys.getdefaultencoding()
print "stdin, stdout", sys.stdin.encoding,sys.stdout.encoding
if hasattr(sys, 'setdefaultencoding'):
sys.setdefaultencoding('utf8')
else:
reload(sys) # Reload does the trick!
sys.setdefaultencoding('utf-8')
FILE_ENCODING = sys.getfilesystemencoding() #'cp1252'#
SYSTEM_ENCODING = sys.getdefaultencoding()#sys.stdout.encoding#
print "FILE_ENCODING", FILE_ENCODING
print "SYSTEM_ENCODING", SYSTEM_ENCODING
######################################################################################
def toSystemEncoding(path):
# try:
path = path.decode(FILE_ENCODING)
path = path.encode(SYSTEM_ENCODING)
return path
# except:
# return self.path
######################################################################################
def toFileEncoding(path):
# try:
path = path.decode(SYSTEM_ENCODING)
return path.encode(FILE_ENCODING)
# except:
# return path
from util_path import *
# Fichiers CFG "originaux"
FICHIER_TYPES_ORG = os.path.join(PATH, toFileEncoding("Types.cfg"))
# Fichiers CFG("commun" ou bien "utilisateur" selon le choix fait à l'installation)
FICHIER_TYPES = os.path.join(APP_DATA_PATH, toFileEncoding("Types.cfg"))
## Fichiers CFG "utilisateurs"
FICHIER_CFG_USER = os.path.join(APP_DATA_PATH_USER, toFileEncoding("pyXorga.cfg"))
#FICHIER_TYPES_USER = os.path.join(APP_DATA_PATH_USER, toFileEncoding("Types.cfg"))
#######################################################################################################
# Paramètres par défaut
#######################################################################################################
EXCLURE_DIR = [u""]
INCLURE_DIR = [u"*"]
INCLURE_FIC = [u"*"]
EXCLURE_FIC = [u""]
INCLURE_TYP = [u"C"]
EXCLURE_TYP = [u"*"]
FILTRER_TYP = True
DOSSIER = u""
FICHIER = u""
EXCLURE_DOSSIERS_VIDE = True
STRUCTURE = 'structure-class="org.xmind.ui.logic.right"'
MARQUEUR_DOSSIER = "Dossier"
#MarqueurIDDossier = MarkerId(MARQUEUR_DOSSIER)
MarqueurDossier = MarkerRefElement()
MarqueurDossier.setMarkerId(MARQUEUR_DOSSIER)
def utf8decode(s):
s = s.encode("iso-8859-1")
return s.decode("utf-8")
def listdirectory2(path):
fichier=[]
for root, dirs, files in os.walk(path):
for i in files:
fichier.append(os.path.join(root, i))
return fichier
#
def GetTypeNom(nFich):
""" Renvoie le type et le nom du document
nFich : String encodé en SYSTEM_ENCODING
"""
parties = nFich.split(SEPARATEUR)
if len(parties) > 1:
for t in TYPES.keys():
if parties[0] == t:
return t, parties[1]
return None, nFich
def GetType(nFich):
""" Renvoie le type du document
nFich : String encodé en SYSTEM_ENCODING
"""
parties = nFich.split(SEPARATEUR)
if len(parties) > 1:
for t in TYPES.keys():
if parties[0] == t:
return t
return None
def GetNomSimple(file, typ):
return os.path.splitext(file[len(TYPES[typ][0]):])[0]
#################################################################################################
#
# Gestion du fichier de configuration
#
#################################################################################################
SECTION_FICH = u"FichierDossiers"
SECTION_FILTRE = u"Filtres"
#listdirectory(INPUT_DIR, root_topic)
#
#root_topic.add_subtopic(u"First item")
#root_topic.add_subtopic(u"Second item")
#t = root_topic.add_subtopic(u"Third item")
#t.add_subtopic(u"Second level - 1")
#t.add_subtopic(u"Second level - 2")
#root_topic.add_subtopic(u"Detached topic", detached = True)
#t.add_subtopic(u"Another detached", detached = True)
#t.add_marker("flag-red")
#root_topic.add_subtopic(u"Link example").set_link("http://mekk.waw.pl")
##root_topic.add_subtopic(u"Attachment example").set_attachment(
## file("map_creator.py").read(), ".txt")
#root_topic.add_subtopic(u"With note").set_note(u"""This is just some dummy note.""")
#MARKER_CODE = "40g6170ftul9bo17p1r31nqk2a"
#XMP = "../../py_mekk_nozbe2xmind/src/mekk/nozbe2xmind/NozbeIconsMarkerPackage.xmp"
#root_topic.add_subtopic(u"With non-standard marker").add_marker(MARKER_CODE)
#
#xmind.embed_markers(XMP)
#
#xmind.pretty_print()
####################################################################################
#
# Classe définissant l'application
# --> récupération des paramétres passés en ligne de commande
#
####################################################################################
#from asyncore import dispatcher, loop
#import sys, time, socket, threading
#class SeqApp(wx.App):
# def OnInit(self):
# wx.Log.SetLogLevel(0) # ?? Pour éviter le plantage de wxpython 3.0 avec Win XP pro ???
#
##########################################################################################
def fcount(path):
count1 = 0
for root, dirs, files in os.walk(path):
count1 += len(dirs)
return count1
##########################################################################################
def getListNomGlob(path, liste):
""" Renvoie la liste des fichiers
"""
os.chdir(path)
l = []
for f in liste:
l.extend(glob.glob(f))
# l = [f.encode(FILE_ENCODING) for f in l]
return l
##########################################################################################
def estInclus(dossier1, dossier2):
""" Vérifie si dossier1 est un sous-dossier de dossier2
"""
# print "estInclus : "
# print " ", dossier1
# print " ", dossier2
# print " ", dossier1[:len(dossier2)]
if len(dossier1) < len(dossier2):
return False
return dossier1[:len(dossier2)] == dossier2
class FilterNB(wx.Notebook):
def __init__(self, parent, app, exclure_Dir , inclure_Dir,
exclure_Fic, inclure_Fic,
exclure_Typ, inclure_Typ,
filtrerTypes):
wx.Notebook.__init__(self, parent, -1, size=(21,21), style=
wx.BK_DEFAULT
#wx.BK_TOP
#wx.BK_BOTTOM
#wx.BK_LEFT
#wx.BK_RIGHT
# | wx.NB_MULTILINE
)
self.winDossiers = PanelInclureExclure(self, app, "D", inclure_Dir, exclure_Dir)
self.AddPage(self.winDossiers, "Dossiers")
# self.exclure_D = winDossiers.exclure
# self.inclure_D = winDossiers.inclure
# Show how to put an image on one of the notebook tabs,
# first make the image list:
# il = wx.ImageList(16, 16)
# idx1 = il.Add(images.Smiles.GetBitmap())
# self.AssignImageList(il)
# now put an image on the first tab we just created:
# self.SetPageImage(0, idx1)
self.winExtensions = PanelInclureExclure(self, app, "F", inclure_Fic, exclure_Fic)
self.AddPage(self.winExtensions, u"Fichiers")
# self.exclure_F = winExtensions.exclure
# self.inclure_F = winExtensions.inclure
self.winTypes = PanelInclureExclureTypes(self, app, inclure_Typ, exclure_Typ, filtrerTypes)
self.AddPage(self.winTypes, u"Types")
# self.exclure_T = winTypes.exclure
# self.inclure_T = winTypes.inclure
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGING, self.OnPageChanging)
def OnPageChanged(self, event):
# old = event.GetOldSelection()
# new = event.GetSelection()
# sel = self.GetSelection()
# self.log.write('OnPageChanged, old:%d, new:%d, sel:%d\n' % (old, new, sel))
event.Skip()
def OnPageChanging(self, event):
# old = event.GetOldSelection()
# new = event.GetSelection()
# sel = self.GetSelection()
# self.log.write('OnPageChanging, old:%d, new:%d, sel:%d\n' % (old, new, sel))
event.Skip()
class pyXorgFrame(wx.Frame):
def __init__(self, nomFichier = None):
wx.Frame.__init__(self, None, -1, "pyXorga" + version.__version__, size = (400,600))
p = wx.Panel(self, -1, style = wx.TAB_TRAVERSAL
| wx.CLIP_CHILDREN
| wx.FULL_REPAINT_ON_RESIZE
)
self.SetIcon(wx.Icon(os.path.join(PATH, r"pyXorga_icone.ico"), wx.BITMAP_TYPE_ICO))
self.exclure_Dir = EXCLURE_DIR
self.inclure_Dir = INCLURE_DIR
self.exclure_Fic = EXCLURE_FIC
self.inclure_Fic = INCLURE_FIC
self.exclure_Typ = EXCLURE_TYP
self.inclure_Typ = INCLURE_TYP
self.filtrerTypes = FILTRER_TYP
self.ouvrirCFG()
#
# Variables
#
self.nomFichier = FICHIER
self.dossier = DOSSIER
self.dossierSortie = u""
self.titreCarte = u""
if nomFichier != None:
# Un dossier est passé en argument
self.dossier = nomFichier
self.nomFichier = os.path.join(self.dossier, os.path.split(self.dossier)[1] + toFileEncoding(".xmind"))
if len(os.path.split(self.dossier)[1]) > 0:
self.dossierSortie = os.path.split(self.dossier)[0]
self.titreCarte = os.path.splitext(os.path.split(self.dossier)[1])[0]
self.ajouterCarteMentale = True
self.ouvrirTypes()
#
# Dossier à traiter
#
box = wx.StaticBox(p, -1, u"Dossier à traiter")
bsizerd = wx.StaticBoxSizer(box, wx.VERTICAL)
c = URLSelectorCombo(p, self, self.dossier, "D")
self.selecteur_D = c
bsizerd.Add(c, 0, wx.ALL|wx.EXPAND, 5)
#
# Sorties
#
box = wx.StaticBox(p, -1, u"Structure de sortie")
bsizerxs = wx.StaticBoxSizer(box, wx.VERTICAL)
st = wx.StaticText(p, -1, u"Nom de la racine")
ct = self.ctrlTitre = wx.TextCtrl(p, -1, self.titreCarte)
self.Bind(wx.EVT_TEXT, self.EvtText, ct)
#
# Fichier Xmind de sortie
#
box = wx.StaticBox(p, -1, u"Carte mentale XMind du Dossier à traiter")
bsizerx = wx.StaticBoxSizer(box, wx.HORIZONTAL)
c = URLSelectorCombo(p, self, self.nomFichier, "F")
bsizerx.Add(c, 1, wx.ALL|wx.EXPAND, 5)
self.selecteur_F = c
b = self.boutonGenererXMind = wx.Button(p, -1, u"Générer\nla carte", (20, 80))
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
self.boutonGenererXMind.Enable(self.nomFichier != u"")
b.SetToolTipString(u"Générer une carte mentale XMind de la structure")
bsizerx.Add(b, 0, wx.ALL|wx.EXPAND, 5)
b = self.boutonOuvrirXMind = wx.BitmapButton(p, -1, Images.LogoXMind.GetBitmap())
self.boutonOuvrirXMind.Enable(os.path.exists(self.nomFichier))
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
b.SetToolTipString(u"Ouvrir la carte mentale générée (XMind nécessaire)")
bsizerx.Add(b, 0, wx.ALL|wx.EXPAND, 5)
#
# Dossier de sortie
#
box = wx.StaticBox(p, -1, u"Copie filtrée du Dossier à traiter")
bsizers = wx.StaticBoxSizer(box, wx.HORIZONTAL)
vs = wx.BoxSizer(wx.VERTICAL)
t = wx.StaticText(p, -1, u"Emplacement du dossier :")
vs.Add(t, 0, wx.ALL|wx.EXPAND, 2)
c = URLSelectorCombo(p, self, self.dossierSortie, "D")
self.selecteur_DS = c
vs.Add(c, 1, wx.ALL|wx.EXPAND, 2)
cb = wx.CheckBox(p, -1, u"Ajouter une Carte mentale")
cb.SetValue(self.ajouterCarteMentale)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, cb)
cb.SetToolTipString(u"Générer une carte mentale à la racine du dossier")
vs.Add(cb, 1, wx.ALL|wx.EXPAND, 2)
bsizers.Add(vs, 1, wx.ALL|wx.EXPAND, 5)
b = self.boutonGenererClone = wx.Button(p, -1, u"Générer\ndossier", (20, 80))
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
self.boutonGenererClone.Enable(self.testerValiditeClone())
b.SetToolTipString(u"Générer une arborescence de fichiers de la structure")
bsizers.Add(b, 0, wx.ALL|wx.EXPAND, 5)
b = self.boutonOuvrirDossier = wx.BitmapButton(p, -1, wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, (42, 42)))
self.boutonOuvrirDossier.Enable(os.path.exists(self.dossierSortie))
self.Bind(wx.EVT_BUTTON, self.OnClick, b)
b.SetToolTipString(u"Ouvrir le dossier généré")
bsizers.Add(b, 0, wx.ALL|wx.EXPAND, 5)
#
# Filtres
#
box = wx.StaticBox(p, -1, u"Filtres")
bsizerf = wx.StaticBoxSizer(box, wx.VERTICAL)
self.FilterNB = FilterNB(p, self, self.exclure_Dir , self.inclure_Dir,
self.exclure_Fic, self.inclure_Fic,
self.exclure_Typ, self.inclure_Typ,
self.filtrerTypes)
bsizerf.Add(self.FilterNB, 1, wx.ALL|wx.EXPAND, 5)
#
# Mise en place
#
bsizerxs.Add(st, 0, wx.ALL|wx.EXPAND, 5)
bsizerxs.Add(self.ctrlTitre, 0, wx.ALL|wx.EXPAND, 5)
bsizerxs.Add(bsizerx, 0, wx.ALL|wx.EXPAND, 5)
bsizerxs.Add(bsizers, 0, wx.ALL|wx.EXPAND, 5)
bsizerxs.Add(bsizerf, 1, wx.ALL|wx.EXPAND, 5)
gbs = self.gbs = wx.GridBagSizer(5, 5)
gbs.Add( bsizerd, (0,0), (1,1), wx.ALIGN_CENTER | wx.ALL | wx.EXPAND)
gbs.Add( bsizerxs, (1,0), (1,1), wx.ALIGN_CENTER | wx.ALL | wx.EXPAND)
# gbs.Add( bsizers, (2,0), (1,1), wx.ALIGN_CENTER | wx.ALL | wx.EXPAND)
# gbs.Add( bsizerf, (3,0), (1,1), wx.ALIGN_CENTER | wx.ALL | wx.EXPAND)
gbs.AddGrowableRow(1)
gbs.AddGrowableCol(0)
box = wx.BoxSizer()
box.Add(gbs, 1, wx.ALL|wx.EXPAND, 5)
p.SetSizerAndFit(box)
self.SetMinSize((400, 600))
self.SetClientSize(p.GetBestSize())
# Interception de la demande de fermeture
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.selecteur_D.SetPath(self.dossier)
self.selecteur_F.SetPath(self.nomFichier)
self.selecteur_DS.SetPath(self.dossierSortie)
##########################################################################################
def EvtText(self, event):
self.titreCarte = event.GetString()
self.OnPathModified()
##########################################################################################
def EvtCheckBox(self, event):
self.ajouterCarteMentale = event.IsChecked()
##########################################################################################
def testerDossierExistant(self):
print "testerDossierExistant"
os.chdir(self.dossierSortie)
d = os.path.join(self.dossierSortie, self.titreCarte)
while os.path.exists(d) and len(os.listdir(d)) > 0:
os.chdir(d)
dlg = wx.MessageDialog(self, u"Le dossier suivant existe déja, et n'est pas vide !\n\n\n" \
u"%s\n\n"\
u"Voulez-vous effacer son contenu ?\n" %os.path.join(self.dossierSortie, self.titreCarte),
u'Dossier existant et non vide',
wx.ICON_INFORMATION | wx.YES_NO | wx.CANCEL
)
retCode = dlg.ShowModal()
dlg.Destroy()
if retCode == wx.ID_YES:
os.chdir(d)
for f in os.listdir(d):
if os.path.isdir(f):
shutil.rmtree(f, ignore_errors = False, onerror=onerror)
else:
os.remove(f)
os.chdir(d)
elif retCode == wx.ID_NO:
return False
else:
return False
return True
##########################################################################################
def OnClick(self, event):
if event.GetEventObject() == self.boutonOuvrirXMind:
try:
os.startfile(self.nomFichier)
# subprocess.Popen(["xmind", self.nomFichier])
except:
messageErreur(None, u"Ouverture impossible",
u"Impossible d'accéder au fichier\n\n%s\n" %toSystemEncoding(self.nomFichier))
#####################################################################################################################
elif event.GetEventObject() == self.boutonOuvrirDossier:
try:
os.startfile(os.path.join(self.dossierSortie, self.titreCarte))
# subprocess.Popen(["xmind", self.nomFichier])
except:
messageErreur(None, u"Ouverture impossible",
u"Impossible d'accéder au dossier\n\n%s\n" %toSystemEncoding(self.dossierSortie))
#####################################################################################################################
elif event.GetEventObject() == self.boutonGenererXMind:
if os.path.splitext(self.nomFichier)[1].lower() != ".xmind":
self.nomFichier = os.path.splitext(self.nomFichier)[0] + ".xmind"
if os.path.exists(self.nomFichier):
dlg = wx.MessageDialog(self, u"La carte mentale %s existe déja !\n\n" \
u"Voulez-vous l'écraser ?\n" %self.nomFichier,
u'Carte existante',
wx.ICON_INFORMATION | wx.YES_NO | wx.CANCEL
)
retCode = dlg.ShowModal()
dlg.Destroy()
if retCode == wx.ID_YES:
os.remove(self.nomFichier)
elif retCode == wx.ID_NO:
return
else:
return
#
# try:
# self.dossier = unicode(self.dossier, DEFAUT_ENCODING)
# except:
# pass
th = ThreadDossier(self, mode = 0)
self.dlg = ProgressFrame(None, -1, u"Génération de la carte", th)
self.dlg.Show()
th.SetDlg(self.dlg)
th.start()
#####################################################################################################################
elif event.GetEventObject() == self.boutonGenererClone:
# try:
# self.dossierSortie = unicode(self.dossierSortie, DEFAUT_ENCODING)
# except:
# pass
#
# try:
# self.dossier = unicode(self.dossier, DEFAUT_ENCODING)
# except:
# pass
if self.testerDossierExistant():
os.chdir(self.dossierSortie)
if not os.path.exists(self.titreCarte):
os.mkdir(self.titreCarte)
os.chdir(self.titreCarte)
th = ThreadDossier(self, mode = 1)
self.dlg = ProgressFrame(None, -1, u"Génération du dossier clone", th)
self.dlg.Show()
th.SetDlg(self.dlg)
th.start()
else:
dlg = wx.MessageDialog(self, u"Le dossier cible n'existe pas !\n\n" + self.dossierSortie,
u"Dossier inexistant",
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
##########################################################################################
def getListeIE(self, IE, typ):
return self.FilterNB.getListeIE(IE, typ)
# ##########################################################################################
# def creerCarte(self, nomFichier, titreCarte, dossier):
# # Version sdk
# if os.path.splitext(nomFichier)[1] != ".xmind":
# nomFichier = os.path.splitext(nomFichier)[0] + ".xmind"
# xm = xmind.load(nomFichier)
# first_sheet = xm.getPrimarySheet() # get the first sheet
# first_sheet.setTitle(titreCarte) # set its title
# root_topic = first_sheet.getRootTopic() # get the root topic of this sheet
# root_topic.setTitle(titreCarte) # set its title
# root_topic.setAttribute("structure-class", "org.xmind.ui.logic.right")
# # Version mekk
## xmind = XMindDocument.create(titreCarte, titreCarte)
## first_sheet = xmind.get_first_sheet()
## root_topic = first_sheet.get_root_topic()
#
# self.genererCarte(dossier, root_topic)
#
# # mekk
## xmind.save(nomFichier)
#
# # sdk
# xmind.save(xm, nomFichier) # and we save
#
#
#
# ##########################################################################################
# def genererCarte(self, path, topic):
# vide = True
#
# if not os.path.exists(path) or len(path) > 255:
# return
#
# dirs = os.listdir(path)
## print dirs
#
# for file in dirs:
#
# path_file = os.path.join(path, file)
#
# if os.path.isdir(path_file):
# inclureD = getListNomGlob(path, self.inclure_Dir)
# exclureD = getListNomGlob(path, self.exclure_Dir)
# if not file in exclureD and file in inclureD:
# if len(os.listdir(path))>0:
# # mekk
## t = topic.add_subtopic(file)
#
# # sdk
# t = TopicElement()
# t.setTitle(file)
# t.addMarker("Folder.png")
#
# dv = self.genererCarte(path_file, t)
#
# if EXCLURE_DOSSIERS_VIDE and not dv:
# topic.addSubTopic(t)
# vide = False
## self.count += 1
# self.dlg.Augmenter()
# else:
# self.dlg.Augmenter(fcount(path_file))
## self.count += fcount(path_file)
## self.dlg.Augmenter()
#
#
# else:
# ext = os.path.splitext(file)[1]
# typ, nom = GetTypeNom(file)
# inclureF = getListNomGlob(path, self.inclure_Fic)
# exclureF = getListNomGlob(path, self.exclure_Fic)
# if not file in exclureF and file in inclureF and (typ in self.inclure_Typ or typ == None):
# # mekk
## t = topic.add_subtopic(GetNomSimple(file, typ))
## t.set_link(os.path.join(path, file))
## t.add_marker(TYPES[typ][1])
#
# # sdk
# t = TopicElement()
# if ext != "":
# tx = nom.split(ext)[0]
# else:
# tx = nom
# t.setTitle(tx)
## t.setFileHyperlink(os.path.join(path, file)) # set a file hyperlink
#
# t.setFileHyperlink("file://" + utils.get_abs_path(os.path.join(path, file)))
# if typ != None:
# t.addMarker(TYPES[typ][1])
#
# topic.addSubTopic(t)
#
# vide = False
# return vide
# ##########################################################################################
# def genererDossier(self, path, sortie):
# vide = True
# try:
# sortie = unicode(sortie, DEFAUT_ENCODING)
# except:
# pass
#
# if not os.path.exists(path) or len(path) > 255:
# return
#
# dirs = os.listdir(path)
#
# for file in dirs:
# path_file = os.path.join(path, file)
#
# if os.path.isdir(path_file):
# inclureD = getListNomGlob(path, self.inclure_Dir)
# exclureD = getListNomGlob(path, self.exclure_Dir)
# if not file in exclureD and file in inclureD:
# if len(os.listdir(path))>0:
# os.chdir(sortie)
# os.mkdir(file)
# dv = self.genererDossier(path_file, os.path.join(sortie, file))
#
# if EXCLURE_DOSSIERS_VIDE and not dv:
# vide = False
# else:
# os.chdir(sortie)
# os.rmdir(file)
#
# self.dlg.Augmenter()
# self.dlg.SetInfo(file)
# else:
# self.dlg.Augmenter(fcount(path_file))
# self.dlg.SetInfo(file)
#
# else:
# ext = os.path.splitext(file)[1]
# typ, nom = GetTypeNom(file)
# inclureF = getListNomGlob(path, self.inclure_Fic)
# exclureF = getListNomGlob(path, self.exclure_Fic)
# if not file in exclureF and file in inclureF and (typ in self.inclure_Typ or typ == None):
# shutil.copy2(os.path.join(path, file), sortie)
#
# vide = False
# return vide
#############################################################################
def OnPathModified(self, selecteur = None, lien = None):
if hasattr(self, "selecteur_F") and selecteur == self.selecteur_F:
self.nomFichier = lien
# self.boutonOuvrirXMind.Enable(os.path.exists(self.nomFichier))
# self.boutonGenererXMind.Enable(self.nomFichier != u"")
elif hasattr(self, "selecteur_D") and selecteur == self.selecteur_D:
self.dossier = lien
self.ctrlTitre.SetValue(os.path.basename(self.dossier))
# self.boutonGenererXMind.Enable(self.nomFichier != u"")
elif hasattr(self, "selecteur_DS") and selecteur == self.selecteur_DS:
self.dossierSortie = lien
# if not estInclus(lien, self.dossier) or lien == u"":
#
## self.boutonGenererClone.Enable(self.dossierSortie != u"")
# else:
# self.selecteur_DS.marquerErreur(u"Le dossier de destination ne DOIT PAS être un sous dossier du dossier source !")
#
# Etat des boutons et des selecteurs
#
self.boutonOuvrirXMind.Enable(os.path.exists(self.nomFichier))
self.boutonGenererXMind.Enable(self.nomFichier != u"")
Ok = self.testerValiditeClone()
self.boutonGenererClone.Enable(Ok)
self.Refresh()
#############################################################################
def testerValiditeClone(self):
""" Vérifie que le dossier de clonage est valide
"""
if samefile(os.path.join(self.dossierSortie, self.titreCarte), self.dossier):
self.marquerConflit(True)
return False
else:
self.marquerConflit(False)
if not estInclus(self.dossierSortie, self.dossier):
if self.dossierSortie == u"":
self.selecteur_DS.marquerErreur(u"Nom de dossier non valide")
return False
else:
self.selecteur_DS.marquerErreur()
else:
self.selecteur_DS.marquerErreur(u"Le dossier de destination ne DOIT PAS être un sous dossier du dossier source !")
return False
return True
#############################################################################
def marquerConflit(self, conflit):
if conflit:
txt = u"Le nom du dossier copié est identique au dossier source !\n" \
u"Modifier l'emplacement du dossier ou le nom de la racine"
self.selecteur_DS.marquerErreur(txt)
self.ctrlTitre.SetBackgroundColour(("pink"))
self.ctrlTitre.SetToolTipString(txt)
else:
self.selecteur_DS.marquerErreur()
self.ctrlTitre.SetBackgroundColour(("white"))
self.ctrlTitre.SetToolTipString(u"")
#############################################################################
def MiseAJourFiltres(self, inc, exc = None, typ = "D"):
if typ == "D":
self.exclure_Dir = exc
self.inclure_Dir = inc
elif typ == "F":
self.exclure_Fic = exc
self.inclure_Fic = inc
elif typ == "T":
self.exclure_Typ = exc
self.inclure_Typ = inc
elif typ == None:
self.filtrerTypes = inc
#############################################################################
def OnClose(self, evt):
self.enregistrerCFG()
evt.Skip()
sys.exit()
#############################################################################
def ouvrirCFG(self):
if not os.path.isfile(FICHIER_CFG_USER):
return
config = ConfigParser.ConfigParser()
config.readfp(codecs.open(FICHIER_CFG_USER, "r", "utf8"))
try:
self.dossier = config.get(SECTION_FICH, "Dossier", u"")
except:
pass
try:
self.dossierSortie = config.get(SECTION_FICH, "DossierSortie", u"")
except:
pass
try:
self.nomFichier = config.get(SECTION_FICH, "Fichier", u"")
except:
pass
try:
self.titreCarte = config.get(SECTION_FICH, "Titre", u"")
except:
pass
try:
self.exclure_Dir = config.get(SECTION_FILTRE, "Exclure_Dir").split("\t")
except:
pass
try:
self.inclure_Dir = config.get(SECTION_FILTRE, "Inclure_Dir").split("\t")
except:
pass
try:
self.exclure_Fic = config.get(SECTION_FILTRE, "Exclure_Fic").split("\t")
except:
pass
try:
self.inclure_Fic = config.get(SECTION_FILTRE, "Inclure_Fic").split("\t")
except:
pass
try:
self.exclure_Typ = config.get(SECTION_FILTRE, "Exclure_Typ").split("\t")
except:
pass
try:
self.inclure_Typ = config.get(SECTION_FILTRE, "Inclure_Typ").split("\t")
except:
pass
try:
self.filtrerTypes = config.getboolean(SECTION_FILTRE, "Filtrer_Typ")
except:
pass
#############################################################################
def ouvrirTypes(self):
global SEPARATEUR, TYPES
if not os.path.isfile(FICHIER_TYPES):
if os.path.isfile(FICHIER_TYPES_ORG):
import shutil
shutil.copy(FICHIER_TYPES_ORG, FICHIER_TYPES)
else:
print "Fichier original", FICHIER_TYPES_ORG, "non trouvé"
TYPES = {}
SEPARATEUR = "_"
return
if not os.path.isfile(FICHIER_TYPES):
print "Fichier", FICHIER_TYPES, "non trouvé"
TYPES = {}
SEPARATEUR = "_"
return
config = ConfigParser.ConfigParser()
config.readfp(codecs.open(FICHIER_TYPES, "r", SYSTEM_ENCODING))
# config.read(FICHIER_TYPES)
SEPARATEUR = config.get("Format", "Separateur", u"")[1:-1]
TYPES = {}
i = 1
continuer = True
while continuer:
try :
t = config.get("Types", "T"+str(i))
p, n, f = t.split("#")
TYPES[p] = [n, f]
i += 1
except:
continuer = False
#############################################################################
def enregistrerCFG(self):
config = ConfigParser.ConfigParser()
config.add_section(SECTION_FICH)
config.set(SECTION_FICH, "Dossier", self.dossier)
config.set(SECTION_FICH, "Fichier", self.nomFichier)
config.set(SECTION_FICH, "DossierSortie", self.dossierSortie)
config.set(SECTION_FICH, "Titre", self.titreCarte)
config.add_section(SECTION_FILTRE)
config.set(SECTION_FILTRE, "Exclure_Dir", "\t".join(self.exclure_Dir))
config.set(SECTION_FILTRE, "Inclure_Dir", "\t".join(self.inclure_Dir))
config.set(SECTION_FILTRE, "Exclure_Fic", "\t".join(self.exclure_Fic))
config.set(SECTION_FILTRE, "Inclure_Fic", "\t".join(self.inclure_Fic))
config.set(SECTION_FILTRE, "Exclure_Typ", "\t".join(self.exclure_Typ))
config.set(SECTION_FILTRE, "Inclure_Typ", "\t".join(self.inclure_Typ))
config.set(SECTION_FILTRE, "Filtrer_Typ", self.filtrerTypes)
config.write(open(FICHIER_CFG_USER,'w'))
class URLSelectorCombo(wx.Panel):
def __init__(self, parent, app, lien = "", typ = "D", ext = ""):
wx.Panel.__init__(self, parent, -1)
self.app = app
self.SetMaxSize((-1,22))
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.texte = wx.TextCtrl(self, -1, lien, size = (-1, 16))
if typ == "D":
bt1 =wx.BitmapButton(self, 100, wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, (16, 16)))
bt1.SetToolTipString(u"Sélectionner un dossier")
self.Bind(wx.EVT_BUTTON, self.OnClick, bt1)
self.Bind(wx.EVT_TEXT, self.EvtText, self.texte)
sizer.Add(bt1)
elif typ == "F":
bt2 =wx.BitmapButton(self, 101, wx.ArtProvider_GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, (16, 16)))
bt2.SetToolTipString(u"Sélectionner un fichier")
self.Bind(wx.EVT_BUTTON, self.OnClick, bt2)
self.Bind(wx.EVT_TEXT, self.EvtText, self.texte)
sizer.Add(bt2)
self.ext = u"Xmind (.xmind)|*.xmind|" \
u"Tous les fichiers|*.*'"
self.typ = typ
sizer.Add(self.texte,1,flag = wx.EXPAND)
self.SetSizerAndFit(sizer)
# self.SetPath(lien)
# self.lien = lien
# Overridden from ComboCtrl, called when the combo button is clicked
def OnClick(self, event):
# print "OnClick"
if event.GetId() == 100:
dlg = wx.DirDialog(self, u"Sélectionner un dossier",
style=wx.DD_DEFAULT_STYLE,
defaultPath = self.lien
#| wx.DD_DIR_MUST_EXIST
#| wx.DD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
# print " ", dlg.GetPath()
self.SetPath(dlg.GetPath())
dlg.Destroy()
else:
dlg = wx.FileDialog(self, u"Sélectionner un fichier",
wildcard = self.ext,
# defaultPath = globdef.DOSSIER_EXEMPLES,
style = wx.DD_DEFAULT_STYLE
#| wx.DD_DIR_MUST_EXIST
#| wx.DD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
self.SetPath(dlg.GetPath())
dlg.Destroy()
self.SetFocus()
##########################################################################################
def EvtText(self, event):
self.SetPath(event.GetString())
##########################################################################################
def GetPath(self):
return self.lien
##########################################################################################
def SetPath(self, lien):
""" lien doit être de type 'String' encodé en SYSTEM_ENCODING
"""
# print "SetPath",self.typ, lien
if self.typ == "D":
if os.path.exists(lien) and os.path.isdir(lien):
self.texte.ChangeValue(lien) # On le met en DEFAUT_ENCODING
self.lien = lien
self.marquerErreur()
else:
self.marquerErreur(lien + u" n'est pas un dossier valide !")
self.lien = u""
else:
self.texte.ChangeValue(lien)
self.lien = lien
self.app.OnPathModified(self, self.lien)
self.Refresh()
##########################################################################################
def marquerErreur(self, message = None):
if message != None:
self.texte.SetBackgroundColour(("pink"))
self.texte.SetToolTipString(message)
else:
self.texte.SetBackgroundColour(("white"))
self.texte.SetToolTipString(u"")
##########################################################################################
def SetToolTipString(self, s):
self.texte.SetToolTipString(s)
import wx.lib.scrolledpanel as scrolled
class PanelInclureExclureTypes(scrolled.ScrolledPanel):
def __init__(self, parent, app, inclure = [], exclure = [],
filtrerTypes = False):
scrolled.ScrolledPanel.__init__(self, parent, -1)
self.SetupScrolling()
self.app = app
self.cbI = {}
self.cbE = {}
sizer = wx.GridBagSizer(5,2)
cbF = wx.CheckBox(self, -1, u"Activer le filtrage par type")
cbF.SetToolTipString(u"Permet de filter les fichiers selon des critères de type.\n" \
u"Si ce filtre est activé, seuls les fichiers \"typés\" seront sélectionnés.\n" \
u" => fichiers de la forme : 'préfixe' + 'séparateur' + 'suffixe'")
self.cbF = cbF
self.cbF.SetValue(filtrerTypes)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBoxF, self.cbF)
sizer.Add(self.cbF, (0, 0), (1,4), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, border = 5)
sizer.Add(wx.StaticLine(self, -1), (1, 0), (1,4), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, border = 0)
t = wx.StaticText(self, -1, u"Inclure")
t.SetFont(FONT_IE)
sizer.Add(t, (3, 0), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT, border = 5)
t = wx.StaticText(self, -1, u"Exclure")
t.SetFont(FONT_IE)
sizer.Add(t, (3, 1), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT, border = 5)
t = wx.StaticText(self, -1, u"Préfixe")
t.SetFont(FONT_IE)
sizer.Add(t, (3, 2), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT, border = 5)
t = wx.StaticText(self, -1, u"Type de document")
t.SetFont(FONT_IE)
sizer.Add(t, (3, 3), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT, border = 5)
tte = u"Selectionner tous les types de document"
self.cbIt = wx.CheckBox(self, -1, u"")
self.cbIt.SetToolTipString(tte)
self.cbIt.SetValue(False)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.cbIt)
self.cbEt = wx.CheckBox(self, -1, u"")
self.cbEt.SetToolTipString(tte)
self.cbEt.SetValue(False)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.cbEt)
sizer.Add(self.cbIt, (2, 0), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT|wx.TOP, border = 5)
sizer.Add(self.cbEt, (2, 1), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT|wx.TOP, border = 5)
t = wx.StaticText(self, -1, u"Tous")
t.SetToolTipString(tte)
t.SetFont(FONT_ITALIC)
sizer.Add(t, (2, 2), (1, 2), flag = wx.EXPAND|wx.LEFT|wx.TOP, border = 5)
i = 4
for p, nm in TYPES.items():
self.cbI[p] = wx.CheckBox(self, -1, u"")
self.cbI[p].SetValue(p in inclure)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.cbI[p])
self.cbE[p] = wx.CheckBox(self, -1, u"")
self.cbE[p].SetValue(p in exclure)
self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.cbE[p])
sizer.Add(self.cbI[p], (i, 0), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT, border = 5)
sizer.Add(self.cbE[p], (i, 1), flag = wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.LEFT|wx.RIGHT, border = 5)
sizer.Add(wx.StaticText(self, -1, p), (i, 2), flag = wx.EXPAND|wx.LEFT, border = 5)
sizer.Add(wx.StaticText(self, -1, nm[0]), (i, 3), flag = wx.EXPAND|wx.LEFT, border = 5)
i += 1
self.EvtCheckBoxF()
sizer.AddGrowableCol(3)
self.SetSizer(sizer)
##########################################################################################
def EvtCheckBox(self, event):
if event.GetEventObject() == self.cbIt:
for k in self.cbI.values():
k.SetValue(event.IsChecked())
elif event.GetEventObject() == self.cbEt:
for k in self.cbE.values():
k.SetValue(event.IsChecked())
inclure = [k for k in self.cbI.keys() if self.cbI[k].IsChecked()]
exclure = [k for k in self.cbE.keys() if self.cbE[k].IsChecked()]
self.cbIt.SetValue(len(inclure) == len(self.cbI))
self.cbEt.SetValue(len(exclure) == len(self.cbE))
self.app.MiseAJourFiltres(inclure, exclure, typ = "T")
##########################################################################################
def EvtCheckBoxF(self, event = None):
filtrerTypes = self.cbF.GetValue()
for k in self.cbI.values() + self.cbE.values():
k.Enable(filtrerTypes)
self.cbIt.Enable(filtrerTypes)
self.cbEt.Enable(filtrerTypes)
for c in self.GetChildren():
if isinstance(c, wx.StaticText):
c.Enable(filtrerTypes)
self.app.MiseAJourFiltres(filtrerTypes, typ = None)
class PanelInclureExclure(wx.Panel):
def __init__(self, parent, app, typ = "D", inclure = [], exclure = []):
wx.Panel.__init__(self, parent, -1)
self.inclure = inclure
self.exclure = exclure
self.typ = typ
self.app = app
ti = wx.StaticText(self, -1, u"Inclure")
ti.SetFont(FONT_IE)
te = wx.StaticText(self, -1, u"Exclure")
te.SetFont(FONT_IE)
if typ == "D": n = u"dossiers"
elif typ == "F": n = u"fichiers"
si = self.si = wx.TextCtrl(self, -1, u"\n".join(inclure), style=wx.TE_MULTILINE)
self.Bind(wx.EVT_TEXT, self.EvtText, si)
t_i = u"Spécifier les %s à inclure (les seuls qui figureront dans la structure)\n" \
u"exemples :\n" \
u"\t* \ttous les %s\n" \
u"\tC* \tseulement ceux qui commencent par un \"C\"" %(n, n)
if typ == "F":
t_i += u"\n\t*.pdf \tseulement les PDF\n"
si.SetToolTipString(t_i)
se = self.se = wx.TextCtrl(self, -1, u"\n".join(exclure), style=wx.TE_MULTILINE)
self.Bind(wx.EVT_TEXT, self.EvtText, se)
t_e = u"Spécifier les %s à exclure (ceux qui ne figureront pas dans la structure)\n" \
u"exemples :\n" \
u"\t* \ttous les %s\n" \
u"\tC* \tseulement ceux qui commencent par un \"C\"" %(n ,n)
if typ == "F":
t_e += u"\n\t*.pdf \tseulement les PDF\n"
se.SetToolTipString(t_e)
gbs = wx.GridBagSizer()
gbs.Add(ti, (0,0), flag = wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, border = 4)
gbs.Add(te, (0,1), flag = wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT, border = 4)
gbs.Add(si, (1,0), flag = wx.EXPAND|wx.BOTTOM|wx.LEFT|wx.RIGHT, border = 4)
gbs.Add(se, (1,1), flag = wx.EXPAND|wx.BOTTOM|wx.LEFT|wx.RIGHT, border = 4)
gbs.AddGrowableRow(1)
self.SetSizer(gbs)
##########################################################################################
def EvtText(self, event):
s = event.GetString()
if event.GetEventObject() == self.si:
self.inclure = s.split("\n")
elif event.GetEventObject() == self.se:
self.exclure = s.split("\n")
self.app.MiseAJourFiltres(self.inclure, self.exclure, typ = self.typ)
import threading
import wx.lib.newevent
(UpdateProgressEvent, EVT_UPDATE_PROGRESS) = wx.lib.newevent.NewEvent()
class ThreadDossier(threading.Thread):
def __init__(self, frm, mode):
threading.Thread.__init__(self)
self.frm = frm
self.mode = mode
self._stopevent = threading.Event( )
##########################################################################################
def SetDlg(self, dlg):
self.dlg = dlg
##########################################################################################
def creerCarte(self, nomFichier, titreCarte, dossier):
# Version sdk
if os.path.splitext(nomFichier)[1] != ".xmind":
nomFichier = os.path.splitext(nomFichier)[0] + toSystemEncoding(".xmind")
xm = xmind.load(nomFichier)
first_sheet = xm.getPrimarySheet() # get the first sheet
first_sheet.setTitle(titreCarte) # set its title
root_topic = first_sheet.getRootTopic() # get the root topic of this sheet
root_topic.setTitle(titreCarte) # set its title
root_topic.setAttribute("structure-class", "org.xmind.ui.logic.right")
# Version mekk
# xmind = XMindDocument.create(titreCarte, titreCarte)
# first_sheet = xmind.get_first_sheet()
# root_topic = first_sheet.get_root_topic()
self.genererCarte(dossier, root_topic)
# mekk
# xmind.save(nomFichier)
# sdk
xmind.save(xm, nomFichier) # and we save
##########################################################################################
def fileOk(self, path, file):
""" Vérifie si le couple (path, file) passe le filtre
path et file doivent être encodés en FILE_ENCODING
"""
inclureF = getListNomGlob(path, self.frm.inclure_Fic)
exclureF = getListNomGlob(path, self.frm.exclure_Fic)
fil = toSystemEncoding(file)
typ = GetType(fil)
return not fil in exclureF and fil in inclureF \
and (not self.frm.filtrerTypes \
or (not typ in self.frm.exclure_Typ and (typ in self.frm.inclure_Typ or len(self.frm.inclure_Typ) == 0)))
##########################################################################################
def dirOk(self, path, file):
""" Vérifie si le couple (path, file) passe le filtre
path et file doivent être encodés en FILE_ENCODING
"""
inclureD = getListNomGlob(path, self.frm.inclure_Dir)
exclureD = getListNomGlob(path, self.frm.exclure_Dir)
fil = toSystemEncoding(file)
return not fil in exclureD and fil in inclureD
##########################################################################################
def genererCarte(self, path, topic):
if self._stopevent.isSet():
return
vide = True
if not os.path.exists(path) or len(path) > 255:
return
dirs = os.listdir(path)
for file in dirs:
path_file = os.path.join(path, file)
info_file = toSystemEncoding(path_file[len(self.frm.dossier):])
if os.path.isdir(path_file):
evt = UpdateProgressEvent(augmenter = 0, info = u"Dossier en cours de traitement :\n\n" + info_file,
message = None,
modeStop = False)
wx.PostEvent(self.dlg, evt)
# inclureD = getListNomGlob(path, self.frm.inclure_Dir)
# exclureD = getListNomGlob(path, self.frm.exclure_Dir)
# fil = toSystemEncoding(file)
# if not fil in exclureD and fil in inclureD:
if self.dirOk(path, file):
if len(os.listdir(path))>0:
# mekk
# t = topic.add_subtopic(file)
# sdk
t = TopicElement()
t.setTitle(toSystemEncoding(file))
#t.addMarker("Folder.png")
t.setFileHyperlink("file://" + toSystemEncoding(utils.get_abs_path(os.path.join(path, file))))
dv = self.genererCarte(path_file, t)
if EXCLURE_DOSSIERS_VIDE and not dv:
topic.addSubTopic(t)
vide = False
# self.count += 1
evt = UpdateProgressEvent(augmenter = fcount(path_file), info = None, message = None, modeStop = False)
wx.PostEvent(self.dlg, evt)
# self.dlg.Augmenter()
else:
evt = UpdateProgressEvent(augmenter = fcount(path_file), info = None, message = None, modeStop = False)
wx.PostEvent(self.dlg, evt)
# self.dlg.Augmenter(fcount(path_file))
# self.count += fcount(path_file)
# self.dlg.Augmenter()
else:
if self.fileOk(path, file):
ext = os.path.splitext(file)[1]
typ, nom = GetTypeNom(toSystemEncoding(file))
# inclureF = getListNomGlob(path, self.frm.inclure_Fic)
# exclureF = getListNomGlob(path, self.frm.exclure_Fic)
# if not file in exclureF and file in inclureF and (typ in self.frm.inclure_Typ or typ == None):
# mekk
# t = topic.add_subtopic(GetNomSimple(file, typ))
# t.set_link(os.path.join(path, file))
# t.add_marker(TYPES[typ][1])
# sdk
t = TopicElement()
if ext != "":
tx = nom.split(ext)[0]
else:
tx = nom
t.setTitle(tx)
t.setFileHyperlink("file://" + toSystemEncoding(utils.get_abs_path(os.path.join(path, file))))
if typ != None:
t.addMarker(TYPES[typ][1])
topic.addSubTopic(t)
vide = False
return vide
##########################################################################################
def genererDossier(self, path, sortie):
if self._stopevent.isSet():
return
vide = True
# try:
# sortie = unicode(sortie, DEFAUT_ENCODING)
# except:
# pass
if not os.path.exists(path) or len(path) > 255:
return
dirs = os.listdir(path)
for file in dirs:
path_file = os.path.join(path, file)
info_file = ".." + toSystemEncoding(path_file[len(self.frm.dossier):])
if os.path.isdir(path_file):
evt = UpdateProgressEvent(augmenter = 0, info = u"Dossier en cours de traitement :\n\n" + info_file,
message = None, modeStop = False)
wx.PostEvent(self.dlg, evt)
if self.dirOk(path, file):
# inclureD = getListNomGlob(path, self.frm.inclure_Dir)
# exclureD = getListNomGlob(path, self.frm.exclure_Dir)
# fil = toSystemEncoding(file)
# if not fil in exclureD and fil in inclureD:
if len(os.listdir(path))>0:
os.chdir(sortie)
try:
os.mkdir(file)
except WindowsError as e:
print "WindowsError({0}): {1}".format(e.errno, e.strerror), e.filename
except:
print "Unexpected error:", sys.exc_info()[0]
raise
dv = self.genererDossier(path_file, os.path.join(sortie, file))
if EXCLURE_DOSSIERS_VIDE and not dv:
vide = False
else:
os.chdir(sortie)
try:
os.rmdir(file)
except:
pass
evt = UpdateProgressEvent(augmenter = 1, info = None, message = None, modeStop = False)
wx.PostEvent(self.dlg, evt)
else:
evt = UpdateProgressEvent(augmenter = fcount(path_file), info = None, message = None, modeStop = False)
wx.PostEvent(self.dlg, evt)
else:
if self.fileOk(path, file):
shutil.copy2(os.path.join(path, file), sortie)
vide = False
# ext = os.path.splitext(file)[1]
# typ, nom = GetTypeNom(file)
# inclureF = getListNomGlob(path, self.frm.inclure_Fic)
# exclureF = getListNomGlob(path, self.frm.exclure_Fic)
# if not file in exclureF and file in inclureF \
# and (not typ in self.frm.exclure_Typ and (typ in self.frm.inclure_Typ or len(self.frm.inclure_Typ) == 0)):
# shutil.copy2(os.path.join(path, file), sortie)
# vide = False
return vide
def run(self):
#
# Carte mentale
#
if self.mode == 0:
wx.BeginBusyCursor(wx.HOURGLASS_CURSOR)
nDossiers = fcount(self.frm.dossier)
self.dlg.SetMaxi(nDossiers)
evt = UpdateProgressEvent(augmenter = 0, info = None,
message = u"Génération de la carte mentale ...\n\n",
modeStop = False)
wx.PostEvent(self.dlg, evt)
self.creerCarte(toFileEncoding(self.frm.nomFichier),
self.frm.titreCarte,
self.frm.dossier)
wx.EndBusyCursor()
evt = UpdateProgressEvent(augmenter = 0, info = u"Fichier :\n" + toSystemEncoding(self.frm.nomFichier),
message = u"La carte mentale à été correctement générée\n\n",
modeStop = True)
wx.PostEvent(self.dlg, evt)
self.frm.boutonOuvrirXMind.Enable(True)
#
# Dossier clone
#
else:
wx.BeginBusyCursor(wx.HOURGLASS_CURSOR)
nDossiers = fcount(self.frm.dossier)
self.dlg.SetMaxi(nDossiers)
evt = UpdateProgressEvent(augmenter = 0, info = None,
message = u"Génération du dossier clone ...\n\n",
modeStop = False)
wx.PostEvent(self.dlg, evt)
self.genererDossier(toFileEncoding(self.frm.dossier), os.getcwd())
if not self._stopevent.isSet():
if self.frm.ajouterCarteMentale:
evt = UpdateProgressEvent(augmenter = 0, info = u"",
message = u"Génération de la carte mentale ...\n\n",
modeStop = False)
wx.PostEvent(self.dlg, evt)
f = toFileEncoding(self.frm.titreCarte)
fichierXmind = os.path.join(toFileEncoding(self.frm.dossierSortie),
f, f)
self.creerCarte(fichierXmind, self.frm.titreCarte,
os.path.join(toFileEncoding(self.frm.dossierSortie), f))
wx.EndBusyCursor()
evt = UpdateProgressEvent(augmenter = 0, info = u"Fichier :\n" + toSystemEncoding(self.frm.dossier),
message = u"Le dossier clone à été correctement générée\n\n",
modeStop = True)
wx.PostEvent(self.dlg, evt)
self.frm.boutonOuvrirDossier.Enable(os.path.exists(self.frm.dossierSortie))
def stop(self):
self._stopevent.set()
class ProgressFrame(wx.Frame):
def __init__(
self, parent, ID, title, thread, size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
):
wx.Frame.__init__(self, None, ID, title, pos, size)
panel = wx.Panel(self, -1)
self.thread = thread
self.Bind(EVT_UPDATE_PROGRESS, self.OnUpdate)
sizer = wx.BoxSizer(wx.VERTICAL)
self.label = wx.StaticText(panel, -1, u"\n")
self.label.SetFont(FONT_ACTION)
sizer.Add(self.label, 0, wx.ALIGN_LEFT|wx.ALL|wx.EXPAND, 5)
self.info = wx.StaticText(panel, -1, u"\n\n", style = wx.ST_ELLIPSIZE_START)
sizer.Add(self.info, 0, wx.ALIGN_LEFT|wx.ALL|wx.EXPAND, 5)
self.gauge = wx.Gauge(panel, -1, 1)
sizer.Add(self.gauge, 0, wx.ALIGN_CENTRE|wx.ALL|wx.EXPAND, 5)
self.count = 0
line = wx.StaticLine(panel, -1, size=(-1,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5)
self.btn = wx.Button(panel, -1, u"Annuler")
self.btn.SetHelpText(u"Annuler le traitement")
self.Bind(wx.EVT_BUTTON, self.OnClick, self.btn)
self.btn.SetDefault()
# btn.SetSize(btn.GetBestSize())
sizer.Add(self.btn, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.SetMinSize((250,120))
self.Layout()
panel.SetSizerAndFit(sizer)
# sizer.Fit(self)
##########################################################################################
def SetMaxi(self, maxi):
self.gauge.SetRange(maxi)
##########################################################################################
def OnUpdate(self, evt):
self.Augmenter(evt.augmenter)
if evt.info != None:
self.SetInfo(evt.info)
if evt.message != None:
self.SetMessage(evt.message)
if evt.modeStop:
self.btn.SetLabel(u"Terminé")
##########################################################################################
def OnClick(self, event):
if self.btn.GetLabel()[0] == "T":
self.Destroy()
else:
self.thread.stop()
##########################################################################################
def Augmenter(self, n = 1):
self.count += n
# print self.count
self.gauge.SetValue(self.count)
self.Update()
self.Refresh()
##########################################################################################
def SetMessage(self, t):
# print t
self.label.SetLabelText(t)
self.Update()
self.Layout()
self.Refresh()
##########################################################################################
def SetInfo(self, t):
# print t
if t != u"":
self.info.SetLabel(t)
else:
self.info.SetLabel(u"")
# self.Fit()
# self.Update()
# self.Layout()
# self.Refresh()
def onerror(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
#############################################################################################################
def messageErreur(parent, titre, message):
dlg = wx.MessageDialog(parent, message, titre,
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
if __name__ == '__main__':
# if len(sys.argv) > 1:
# arg = sys.argv[1]
# else:
# arg = ''
# sys.exit()
app = wx.App()
FONT_IE = wx.Font(10, wx.SWISS, wx.NORMAL, wx.FONTWEIGHT_BOLD)
FONT_ACTION = wx.Font(11, wx.SWISS, wx.NORMAL, wx.FONTWEIGHT_BOLD)
FONT_ITALIC = wx.Font(10, wx.SWISS, wx.FONTSTYLE_ITALIC, wx.FONTWEIGHT_NORMAL)
NomFichier = None
if len(sys.argv)>1: #un paramètre a été passé
parametre=sys.argv[1]
# on verifie que le fichier passé en paramètre existe
if os.path.isdir(parametre):
NomFichier = parametre
app.frame = pyXorgFrame(NomFichier)
app.frame.Show()
app.MainLoop()
| gpl-3.0 | -3,015,395,779,734,700,000 | 36.335425 | 131 | 0.481107 | false |
linyaoli/acm | tree/hard/implement_trie.prefix_tree.py | 2 | 1693 | class TrieNode:
# Initialize your data structure here.
def __init__(self):
self.val = ""
self.count = 0
self.children = {}
class Trie:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
node = self.root
for letter in word:
if letter not in node.children:
tmp = TrieNode()
node.children[letter] = tmp
node = tmp
else:
node = node.children[letter]
node.count += 1
# @param {string} word
# @return {boolean}
# Returns if the word is in the trie.
def search(self, word):
node = self.root
for letter in word:
if letter in node.children:
node = node.children[letter]
else:
return False
if node.count > 0:
return True
else:
return False
# @param {string} prefix
# @return {boolean}
# Returns if there is any word in the trie
# that starts with the given prefix.
def startsWith(self, prefix):
node = self.root
for letter in prefix:
if letter in node.children:
node = node.children[letter]
else:
return False
return True
# Your Trie object will be instantiated and called as such:
trie = Trie()
trie.insert("somestring")
trie.insert("ab")
print trie.search("key")
print trie.search("a")
print trie.startsWith("some")
print trie.startsWith("a")
| gpl-2.0 | 102,015,981,198,967,100 | 24.873016 | 59 | 0.520969 | false |
nicolasmcnair/magpy | old/1.0/misc.py | 1 | 10162 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 07 14:08:10 2016
Miscellaneous magpy functions
@author: Nicolas McNair
"""
import multiprocessing
import serial
from sys import platform
#switch timer based on platform
if platform == 'win32':
# On Windows, use time.clock
from time import clock
default_timer = clock
else:
# On other platforms use time.time
from time import time
default_timer = time
class serialPortController(multiprocessing.Process):
"""
The class creates a Python process which has direct control of the serial port. Commands for relaying via the serial port are received from separate Python processes via Queues.
N.B. To start the process you must call start() from the parent Python process.
Args:
serialWriteQueue (multiprocessing.Queue): a Queue for receiving commands to be written to the Magstim unit via the serial port
serialReadQueue (multiprocessing.Queue): a Queue for returning automated replies from the Magstim unit when requested
"""
def __init__(self,address,serialWriteQueue,serialReadQueue):
multiprocessing.Process.__init__(self)
self.serialWriteQueue = serialWriteQueue
self.serialReadQueue = serialReadQueue
self.address = address
def run(self):
"""
Continuously monitor the serialWriteQueue for commands from other Python processes to be sent to the Magstim.
When requested, will return the automated reply from the Magstim unit to the calling process via the serialReadQueue.
N.B. This should be called via start() from the parent Python process.
"""
#N.B. most of these settings are actually the default in PySerial, but just being careful.
self.port = serial.Serial(port=self.address,
baudrate=9600,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
xonxoff=False)
#Make sure the RTS pin is set to off
self.port.setRTS(False)
#Set up version compatibility
if int(serial.VERSION.split('.')[0]) >= 3:
self.port.write_timeout = 0.3
self.port.portFlush = self.port.reset_input_buffer
self.port.anyWaiting = lambda:self.port.in_waiting
else:
self.port.writeTimeout=0.3
self.port.portFlush = self.port.flushInput
self.port.anyWaiting = self.port.inWaiting
#This continually monitors the serialWriteQueue for write requests
while True:
message,reply,readBytes = self.serialWriteQueue.get()
#If the first part of the message is None this signals the process to close the port and stop
if message is None:
break
#If the first part of the message is a 1 this signals the process to trigger a quick fire using the RTS pin
elif message == 1:
self.port.setRTS(True)
#If the first part of the message is a -1 this signals the process to reset the RTS pin
elif message == -1:
self.port.setRTS(False)
#Otherwise, the message is a command string
else:
#If there's any rubbish in the input buffer clear it out
if self.port.anyWaiting():
self.port.portFlush()
#Try writing to the port
try:
self.port.write(message)
except serial.SerialTimeoutException:
readBytes = 0;
reply = False;
self.serialReadQueue.put([1,'Timed out while sending command.'])
#If we want a reply, read the response from the Magstim and place it in the serialReadQueue
if reply:
try:
self.serialReadQueue.put([0,self.port.read(readBytes)])
except serial.SerialTimeoutException:
self.serialReadQueue.put([2,'Timed out while waiting for response.'])
#Otherwise just get rid of the reply from the input buffer
else:
self.port.read(readBytes)
#If we get here, it's time to shutdown the serial port controller
self.port.close()
return
class connectionRobot(multiprocessing.Process):
"""
The class creates a Python process which sends an 'enable remote control' command to the Magstim via the serialPortController process every 500ms.
N.B. To start the process you must call start() from the parent Python process.
Args:
serialWriteQueue (multiprocessing.Queue): a Queue for sending commands to be written to the Magstim unit via the serialPortController process
updateTimeQueue (multiprocessing.Queue): a Queue for receiving requests from the parent Python process to delay sending its next command
"""
def __init__(self,serialWriteQueue,updateTimeQueue):
multiprocessing.Process.__init__(self)
self.serialWriteQueue = serialWriteQueue
self.updateTimeQueue = updateTimeQueue
self._stopped = False
self.nextPokeTime = None
def run(self):
"""
Continuously send commands to the serialPortController process every 500ms, while also monitoring the updateTimeQueue for commands from the parent Python process if this should be delayed.
N.B. This should be called via start() from the parent Python process.
"""
#This sends an "enable remote control" command to the serial port controller every 500ms
while not self._stopped:
self.serialWriteQueue.put(('Q@n',None,3))
self.nextPokeTime = default_timer() + 0.5
while default_timer() < self.nextPokeTime:
#Checks to see if there has been an update send from the parent magstim
if not self.updateTimeQueue.empty():
#If the message is None this signals the process to stop
if self.updateTimeQueue.get() is None:
self._stopped = True
break
#Any other message is signals a command has been sent to the serial port controller, so bump the next poke time by 500ms
else:
self.nextPokeTime = default_timer() + 0.5
#If we get here, it's time to shutdown the robot
return
#Calculate checksum for command
def calcCRC(command):
"""Return the CRC checksum for the command string."""
#Convert command string to sum of ASCII values
commandSum = sum(bytearray(command))
#Convert command sum to binary, then invert and return 8-bit character value
return chr(~commandSum & 0xff)
def parseMagstimResponse(responseString,responseType):
"""Interprets responses sent from the Magstim unit."""
#Get ASCII code of first data character
temp = ord(responseString.pop(0))
#Interpret bits
magstimResponse = {'instr':{'standby': temp & 1,
'armed': (temp >> 1) & 1,
'ready': (temp >> 2) & 1,
'coil_present': (temp >> 3) & 1,
'replace_coil': (temp >> 4) & 1,
'error_present': (temp >> 5) & 1,
'error_type': (temp >> 6) & 1,
'remote_status': (temp >> 7) & 1}}
#If a Rapid system and response includes rTMS status
if responseType in {'instr_rapid','rapid_param'}:
#Get ASCII code of second data character
temp = ord(responseString.pop(0))
#Interpret bits; Note: seventh bit is not used
magstimResponse['rapid'] = {'enhanced_power_mode': temp & 1,
'train': (temp >> 1) & 1,
'wait': (temp >> 2) & 1,
'single_pulse_mode': (temp >> 3) & 1,
'hvpsu_connected': (temp >> 4) & 1,
'coil_ready': (temp >> 5) & 1,
'modified_coil_algorithm': (temp >> 7) & 1}
#If requesting parameter settings or coil temperature
if responseType == 'bistim_param':
magstimResponse['bistim_param'] = {'power_a': int(''.join(responseString[0:3])),
'power_b': int(''.join(responseString[3:6])),
'pp_offset': int(''.join(responseString[6:9]))}
elif responseType == 'magstim_param':
magstimResponse['magstim_param'] = {'power': int(''.join(responseString[0:3]))}
elif responseType == 'rapid_param':
magstimResponse['rapid_param'] = {'power': int(''.join(responseString[0:3])),
'frequency': int(''.join(responseString[3:7])) / 10.0,
'n_pulses': int(''.join(responseString[7:11])),
'duration': int(''.join(responseString[11:14])) / 10.0,
'wait': int(''.join(responseString[14:17]))}
elif responseType == 'magstim_temp':
magstimResponse['magstim_temp'] = {'coil1_temp': int(''.join(responseString[0:3])) / 10.0,
'coil2_temp': int(''.join(responseString[3:6])) / 10.0}
return magstimResponse
| gpl-3.0 | -6,650,430,580,335,798,000 | 49.065327 | 196 | 0.551269 | false |
trac-ja/trac-ja | trac/admin/tests/console.py | 3 | 54118 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Tim Moloney <[email protected]>
import difflib
import os
import re
import sys
import unittest
from StringIO import StringIO
# IAdminCommandProvider implementations
import trac.admin.api
import trac.attachment
import trac.perm
import trac.ticket.admin
import trac.versioncontrol.admin
import trac.versioncontrol.api
import trac.versioncontrol.web_ui
import trac.wiki.admin
# IPermissionRequestor implementations (for 'permission' related tests)
import trac.about
import trac.admin.web_ui
import trac.config
import trac.ticket.api
import trac.ticket.report
import trac.ticket.roadmap
import trac.ticket.web_ui
import trac.search.web_ui
import trac.timeline.web_ui
import trac.wiki.web_ui
from trac.admin import console, console_date_format
from trac.test import EnvironmentStub
from trac.util.datefmt import format_date, get_date_format_hint
from trac.web.tests.session import _prep_session_table
STRIP_TRAILING_SPACE = re.compile(r'( +)$', re.MULTILINE)
def load_expected_results(file, pattern):
"""Reads the file, named file, which contains test results separated by the
regular expression pattern.
The test results are returned as a dictionary.
"""
expected = {}
compiled_pattern = re.compile(pattern)
f = open(file, 'r')
for line in f:
line = line.rstrip().decode('utf-8')
match = compiled_pattern.search(line)
if match:
test = match.groups()[0]
expected[test] = ''
else:
expected[test] += line + '\n'
f.close()
return expected
class TracadminTestCase(unittest.TestCase):
"""
Tests the output of trac-admin and is meant to be used with
.../trac/tests.py.
"""
expected_results = load_expected_results(
os.path.join(os.path.split(__file__)[0], 'console-tests.txt'),
'===== (test_[^ ]+) =====')
def setUp(self):
self.env = EnvironmentStub(default_data=True, enable=('trac.*',),
disable=('trac.tests.*',))
self._admin = console.TracAdmin()
self._admin.env_set('', self.env)
# Set test date to 11th Jan 2004
self._test_date = '2004-01-11'
def tearDown(self):
self.env = None
def _execute(self, cmd, strip_trailing_space=True, input=None):
_in = sys.stdin
_err = sys.stderr
_out = sys.stdout
try:
if input:
sys.stdin = StringIO(input.encode('utf-8'))
sys.stdin.encoding = 'utf-8' # fake input encoding
sys.stderr = sys.stdout = out = StringIO()
out.encoding = 'utf-8' # fake output encoding
retval = None
try:
retval = self._admin.onecmd(cmd)
except SystemExit:
pass
value = out.getvalue()
if isinstance(value, str): # reverse what print_listing did
value = value.decode('utf-8')
# DEBUG: uncomment in case of `AssertionError: 0 != 2` in tests
#if retval != 0:
# print>>_err, value
if strip_trailing_space:
return retval, STRIP_TRAILING_SPACE.sub('', value)
else:
return retval, value
finally:
sys.stdin = _in
sys.stderr = _err
sys.stdout = _out
def assertEqual(self, expected_results, output):
if not (isinstance(expected_results, basestring) and \
isinstance(output, basestring)):
return unittest.TestCase.assertEqual(self, expected_results, output)
def diff():
# Create a useful delta between the output and the expected output
output_lines = ['%s\n' % x for x in output.split('\n')]
expected_lines = ['%s\n' % x for x in expected_results.split('\n')]
return ''.join(difflib.unified_diff(expected_lines, output_lines,
'expected', 'actual'))
if '[...]' in expected_results:
m = re.match(expected_results.replace('[...]', '.*'), output,
re.MULTILINE)
unittest.TestCase.assertTrue(self, m,
"%r != %r\n%s" % (expected_results,
output, diff()))
else:
unittest.TestCase.assertEqual(self, expected_results, output,
"%r != %r\n%s" % (expected_results,
output, diff()))
# Help test
def test_help_ok(self):
"""
Tests the 'help' command in trac-admin. Since the 'help' command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
from trac import __version__
test_name = sys._getframe().f_code.co_name
d = {'version': __version__,
'date_format_hint': get_date_format_hint()}
expected_results = self.expected_results[test_name] % d
rv, output = self._execute('help')
self.assertEqual(0, rv)
self.assertEqual(expected_results, output)
# Attachment tests
def test_attachment_list_empty(self):
"""
Tests the 'attachment list' command in trac-admin, on a wiki page that
doesn't have any attachments.
"""
# FIXME: Additional tests should be written for the other 'attachment'
# commands. This requires being able to control the current
# time, which in turn would require centralizing the time
# provider, for example in the environment object.
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('attachment list wiki:WikiStart')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
# Config tests
def test_config_get(self):
"""
Tests the 'config get' command in trac-admin. This particular
test gets the project name from the config.
"""
test_name = sys._getframe().f_code.co_name
self.env.config.set('project', 'name', 'Test project')
rv, output = self._execute('config get project name')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_config_set(self):
"""
Tests the 'config set' command in trac-admin. This particular
test sets the project name using an option value containing a space.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('config set project name "Test project"')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
self.assertEqual('Test project',
self.env.config.get('project', 'name'))
def test_config_remove(self):
"""
Tests the 'config remove' command in trac-admin. This particular
test removes the project name from the config, therefore reverting
the option to the default value.
"""
test_name = sys._getframe().f_code.co_name
self.env.config.set('project', 'name', 'Test project')
rv, output = self._execute('config remove project name')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
self.assertEqual('My Project', self.env.config.get('project', 'name'))
# Permission tests
def test_permission_list_ok(self):
"""
Tests the 'permission list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_add_one_action_ok(self):
"""
Tests the 'permission add' command in trac-admin. This particular
test passes valid arguments to add one permission and checks for
success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('permission add test_user WIKI_VIEW')
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_add_multiple_actions_ok(self):
"""
Tests the 'permission add' command in trac-admin. This particular
test passes valid arguments to add multiple permissions and checks for
success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('permission add test_user LOG_VIEW FILE_VIEW')
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_add_already_exists(self):
"""
Tests the 'permission add' command in trac-admin. This particular
test passes a permission that already exists and checks for the
message. Other permissions passed are added.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('permission add anonymous WIKI_CREATE '
'WIKI_VIEW WIKI_MODIFY')
self.assertEqual(0, rv)
rv, output2 = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output + output2)
def test_permission_remove_one_action_ok(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test passes valid arguments to remove one permission and checks for
success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('permission remove anonymous TICKET_MODIFY')
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_remove_multiple_actions_ok(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test passes valid arguments to remove multiple permission and checks
for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('permission remove anonymous WIKI_CREATE WIKI_MODIFY')
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_remove_all_actions_for_user(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test removes all permissions for anonymous.
"""
test_name = sys._getframe().f_code.co_name
self._execute('permission remove anonymous *')
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_remove_action_for_all_users(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test removes the TICKET_CREATE permission from all users.
"""
test_name = sys._getframe().f_code.co_name
self._execute('permission add anonymous TICKET_CREATE')
self._execute('permission remove * TICKET_CREATE')
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_remove_unknown_user(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test tries removing a permission from an unknown user.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('permission remove joe TICKET_VIEW')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_remove_action_not_granted(self):
"""
Tests the 'permission remove' command in trac-admin. This particular
test tries removing TICKET_CREATE from user anonymous, who doesn't
have that permission.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('permission remove anonymous TICKET_CREATE')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_export_ok(self):
"""
Tests the 'permission export' command in trac-admin. This particular
test exports the default permissions to stdout.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('permission export')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_permission_import_ok(self):
"""
Tests the 'permission import' command in trac-admin. This particular
test exports additional permissions, removes them and imports them back.
"""
test_name = sys._getframe().f_code.co_name
user = u'test_user\u0250'
self._execute('permission add ' + user + ' WIKI_VIEW')
self._execute('permission add ' + user + ' TICKET_VIEW')
rv, output = self._execute('permission export')
self._execute('permission remove ' + user + ' *')
rv, output = self._execute('permission import', input=output)
self.assertEqual(0, rv)
self.assertEqual('', output)
rv, output = self._execute('permission list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
# Component tests
def test_component_list_ok(self):
"""
Tests the 'component list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('component list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_add_ok(self):
"""
Tests the 'component add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('component add new_component new_user')
rv, output = self._execute('component list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_add_error_already_exists(self):
"""
Tests the 'component add' command in trac-admin. This particular
test passes a component name that already exists and checks for an
error message.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('component add component1 new_user')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_rename_ok(self):
"""
Tests the 'component rename' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('component rename component1 changed_name')
rv, output = self._execute('component list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_rename_error_bad_component(self):
"""
Tests the 'component rename' command in trac-admin. This particular
test tries to rename a component that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('component rename bad_component changed_name')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_rename_error_bad_new_name(self):
"""
Tests the 'component rename' command in trac-admin. This particular
test tries to rename a component to a name that already exists.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('component rename component1 component2')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_chown_ok(self):
"""
Tests the 'component chown' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('component chown component2 changed_owner')
rv, output = self._execute('component list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_chown_error_bad_component(self):
"""
Tests the 'component chown' command in trac-admin. This particular
test tries to change the owner of a component that does not
exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('component chown bad_component changed_owner')
self.assertEqual(2, rv)
# We currently trigger a deprecation warning with py26 so we
# can currrently only verify that the end of the output string is
# correct
self.assertEqual(output.endswith(self.expected_results[test_name]), True)
def test_component_remove_ok(self):
"""
Tests the 'component remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('component remove component1')
rv, output = self._execute('component list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_component_remove_error_bad_component(self):
"""
Tests the 'component remove' command in trac-admin. This particular
test tries to remove a component that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('component remove bad_component')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
# Ticket-type tests
def test_ticket_type_list_ok(self):
"""
Tests the 'ticket_type list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_add_ok(self):
"""
Tests the 'ticket_type add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('ticket_type add new_type')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_add_error_already_exists(self):
"""
Tests the 'ticket_type add' command in trac-admin. This particular
test passes a ticket type that already exists and checks for an error
message.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('ticket_type add defect')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_change_ok(self):
"""
Tests the 'ticket_type change' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('ticket_type change defect bug')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_change_error_bad_type(self):
"""
Tests the 'ticket_type change' command in trac-admin. This particular
test tries to change a priority that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('ticket_type change bad_type changed_type')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_change_error_bad_new_name(self):
"""
Tests the 'ticket_type change' command in trac-admin. This particular
test tries to change a ticket type to another type that already exists.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('ticket_type change defect task')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_remove_ok(self):
"""
Tests the 'ticket_type remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('ticket_type remove task')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_remove_error_bad_type(self):
"""
Tests the 'ticket_type remove' command in trac-admin. This particular
test tries to remove a ticket type that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('ticket_type remove bad_type')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_order_down_ok(self):
"""
Tests the 'ticket_type order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('ticket_type order defect down')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_order_up_ok(self):
"""
Tests the 'ticket_type order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('ticket_type order enhancement up')
rv, output = self._execute('ticket_type list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_ticket_type_order_error_bad_type(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test tries to reorder a priority that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('ticket_type order bad_type up')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
# Priority tests
def test_priority_list_ok(self):
"""
Tests the 'priority list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('priority list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_add_ok(self):
"""
Tests the 'priority add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('priority add new_priority')
rv, output = self._execute('priority list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_add_many_ok(self):
"""
Tests adding more than 10 priority values. This makes sure that
ordering is preserved when adding more than 10 values.
"""
test_name = sys._getframe().f_code.co_name
for i in xrange(11):
self._execute('priority add p%s' % i)
rv, output = self._execute('priority list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_add_error_already_exists(self):
"""
Tests the 'priority add' command in trac-admin. This particular
test passes a priority name that already exists and checks for an
error message.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('priority add blocker')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_change_ok(self):
"""
Tests the 'priority change' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('priority change major normal')
rv, output = self._execute('priority list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_change_error_bad_priority(self):
"""
Tests the 'priority change' command in trac-admin. This particular
test tries to change a priority that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('priority change bad_priority changed_name')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_change_error_bad_new_name(self):
"""
Tests the 'priority change' command in trac-admin. This particular
test tries to change a priority to a name that already exists.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('priority change major minor')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_remove_ok(self):
"""
Tests the 'priority remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('priority remove major')
rv, output = self._execute('priority list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_remove_error_bad_priority(self):
"""
Tests the 'priority remove' command in trac-admin. This particular
test tries to remove a priority that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('priority remove bad_priority')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_order_down_ok(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('priority order blocker down')
rv, output = self._execute('priority list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_order_up_ok(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('priority order critical up')
rv, output = self._execute('priority list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_priority_order_error_bad_priority(self):
"""
Tests the 'priority order' command in trac-admin. This particular
test tries to reorder a priority that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('priority remove bad_priority')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
# Severity tests
def test_severity_list_ok(self):
"""
Tests the 'severity list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('severity list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_add_ok(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('severity add new_severity')
rv, output = self._execute('severity list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_add_error_already_exists(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes a severity name that already exists and checks for an
error message.
"""
test_name = sys._getframe().f_code.co_name
self._execute('severity add blocker')
rv, output = self._execute('severity add blocker')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_change_ok(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('severity add critical')
self._execute('severity change critical "end-of-the-world"')
rv, output = self._execute('severity list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_change_error_bad_severity(self):
"""
Tests the 'severity change' command in trac-admin. This particular
test tries to change a severity that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('severity change bad_severity changed_name')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_change_error_bad_new_name(self):
"""
Tests the 'severity change' command in trac-admin. This particular
test tries to change a severity to a name that already exists.
"""
test_name = sys._getframe().f_code.co_name
self._execute('severity add major')
self._execute('severity add critical')
rv, output = self._execute('severity change critical major')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_remove_ok(self):
"""
Tests the 'severity add' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('severity remove trivial')
rv, output = self._execute('severity list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_remove_error_bad_severity(self):
"""
Tests the 'severity remove' command in trac-admin. This particular
test tries to remove a severity that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('severity remove bad_severity')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_order_down_ok(self):
"""
Tests the 'severity order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('severity add foo')
self._execute('severity add bar')
self._execute('severity order foo down')
rv, output = self._execute('severity list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_order_up_ok(self):
"""
Tests the 'severity order' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('severity add foo')
self._execute('severity add bar')
self._execute('severity order bar up')
rv, output = self._execute('severity list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_severity_order_error_bad_severity(self):
"""
Tests the 'severity order' command in trac-admin. This particular
test tries to reorder a priority that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('severity remove bad_severity')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
# Version tests
def test_version_list_ok(self):
"""
Tests the 'version list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('version list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_add_ok(self):
"""
Tests the 'version add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('version add 9.9 "%s"' % self._test_date)
rv, output = self._execute('version list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_add_error_already_exists(self):
"""
Tests the 'version add' command in trac-admin. This particular
test passes a version name that already exists and checks for an
error message.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('version add 1.0 "%s"' % self._test_date)
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_rename_ok(self):
"""
Tests the 'version rename' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('version rename 1.0 9.9')
rv, output = self._execute('version list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_rename_error_bad_version(self):
"""
Tests the 'version rename' command in trac-admin. This particular
test tries to rename a version that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('version rename bad_version changed_name')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_time_ok(self):
"""
Tests the 'version time' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('version time 2.0 "%s"' % self._test_date)
rv, output = self._execute('version list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_time_unset_ok(self):
"""
Tests the 'version time' command in trac-admin. This particular
test passes valid arguments for unsetting the date.
"""
test_name = sys._getframe().f_code.co_name
self._execute('version time 2.0 "%s"' % self._test_date)
self._execute('version time 2.0 ""')
rv, output = self._execute('version list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_time_error_bad_version(self):
"""
Tests the 'version time' command in trac-admin. This particular
test tries to change the time on a version that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('version time bad_version "%s"'
% self._test_date)
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_remove_ok(self):
"""
Tests the 'version remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('version remove 1.0')
rv, output = self._execute('version list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_version_remove_error_bad_version(self):
"""
Tests the 'version remove' command in trac-admin. This particular
test tries to remove a version that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('version remove bad_version')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
# Milestone tests
def test_milestone_list_ok(self):
"""
Tests the 'milestone list' command in trac-admin. Since this command
has no command arguments, it is hard to call it incorrectly. As
a result, there is only this one test.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_add_ok(self):
"""
Tests the 'milestone add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('milestone add new_milestone "%s"' % self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_add_utf8_ok(self):
"""
Tests the 'milestone add' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute(u'milestone add \xa9tat_final "%s"' #\xc2\xa9
% self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_add_error_already_exists(self):
"""
Tests the 'milestone add' command in trac-admin. This particular
test passes a milestone name that already exists and checks for an
error message.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('milestone add milestone1 "%s"'
% self._test_date)
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_rename_ok(self):
"""
Tests the 'milestone rename' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('milestone rename milestone1 changed_milestone')
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_rename_error_bad_milestone(self):
"""
Tests the 'milestone rename' command in trac-admin. This particular
test tries to rename a milestone that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('milestone rename bad_milestone changed_name')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_due_ok(self):
"""
Tests the 'milestone due' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('milestone due milestone2 "%s"' % self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_due_unset_ok(self):
"""
Tests the 'milestone due' command in trac-admin. This particular
test passes valid arguments for unsetting the due date.
"""
test_name = sys._getframe().f_code.co_name
self._execute('milestone due milestone2 "%s"' % self._test_date)
self._execute('milestone due milestone2 ""')
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_due_error_bad_milestone(self):
"""
Tests the 'milestone due' command in trac-admin. This particular
test tries to change the due date on a milestone that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('milestone due bad_milestone "%s"'
% self._test_date)
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_completed_ok(self):
"""
Tests the 'milestone completed' command in trac-admin. This particular
test passes valid arguments and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('milestone completed milestone2 "%s"' % self._test_date)
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_completed_error_bad_milestone(self):
"""
Tests the 'milestone completed' command in trac-admin. This particular
test tries to change the completed date on a milestone that does not
exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('milestone completed bad_milestone "%s"'
% self._test_date)
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_remove_ok(self):
"""
Tests the 'milestone remove' command in trac-admin. This particular
test passes a valid argument and checks for success.
"""
test_name = sys._getframe().f_code.co_name
self._execute('milestone remove milestone3')
rv, output = self._execute('milestone list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_milestone_remove_error_bad_milestone(self):
"""
Tests the 'milestone remove' command in trac-admin. This particular
test tries to remove a milestone that does not exist.
"""
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('milestone remove bad_milestone')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_backslash_use_ok(self):
test_name = sys._getframe().f_code.co_name
if self._admin.interactive:
self._execute('version add \\')
else:
self._execute(r"version add '\'")
rv, output = self._execute('version list')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_list_no_sessions(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session list authenticated')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_list_authenticated(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session list authenticated')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_list_anonymous(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session list anonymous')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_list_all(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
if self._admin.interactive:
rv, output = self._execute("session list *")
else:
rv, output = self._execute("session list '*'")
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_list_authenticated_sid(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session list name00')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_list_anonymous_sid(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session list name10:0')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_list_missing_sid(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session list thisdoesntexist')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_add_missing_sid(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session add')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_add_duplicate_sid(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session add name00')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_add_sid_all(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session add john John [email protected]')
self.assertEqual(0, rv)
rv, output = self._execute('session list john')
self.assertEqual(self.expected_results[test_name]
% {'today': format_date(None, console_date_format)},
output)
def test_session_add_sid(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session add john')
self.assertEqual(0, rv)
rv, output = self._execute('session list john')
self.assertEqual(self.expected_results[test_name]
% {'today': format_date(None, console_date_format)},
output)
def test_session_add_sid_name(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session add john John')
self.assertEqual(0, rv)
rv, output = self._execute('session list john')
self.assertEqual(self.expected_results[test_name]
% {'today': format_date(None, console_date_format)},
output)
def test_session_set_attr_name(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session set name name00 JOHN')
self.assertEqual(0, rv)
rv, output = self._execute('session list name00')
self.assertEqual(self.expected_results[test_name], output)
def test_session_set_attr_email(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session set email name00 [email protected]')
self.assertEqual(0, rv)
rv, output = self._execute('session list name00')
self.assertEqual(self.expected_results[test_name], output)
def test_session_set_attr_missing_attr(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session set')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_set_attr_missing_value(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session set name john')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_set_attr_missing_sid(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session set name')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_set_attr_nonexistent_sid(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session set name john foo')
self.assertEqual(2, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_delete_sid(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session delete name00')
self.assertEqual(0, rv)
rv, output = self._execute('session list nam00')
self.assertEqual(self.expected_results[test_name], output)
def test_session_delete_missing_params(self):
test_name = sys._getframe().f_code.co_name
rv, output = self._execute('session delete')
self.assertEqual(0, rv)
self.assertEqual(self.expected_results[test_name], output)
def test_session_delete_anonymous(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session delete anonymous')
self.assertEqual(0, rv)
rv, output = self._execute('session list *')
self.assertEqual(self.expected_results[test_name], output)
def test_session_delete_multiple_sids(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env)
rv, output = self._execute('session delete name00 name01 name02 '
'name03')
self.assertEqual(0, rv)
rv, output = self._execute('session list *')
self.assertEqual(self.expected_results[test_name], output)
def test_session_purge_age(self):
test_name = sys._getframe().f_code.co_name
_prep_session_table(self.env, spread_visits=True)
rv, output = self._execute('session purge 20100112')
self.assertEqual(0, rv)
rv, output = self._execute('session list *')
self.assertEqual(self.expected_results[test_name], output)
def suite():
return unittest.makeSuite(TracadminTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 6,559,324,094,399,952,000 | 40.693374 | 81 | 0.622307 | false |
tempbottle/ironpython3 | Tests/compat/sbs_exceptions/shared.py | 3 | 14427 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from common import runtests, printwith
import sys
import nt
class CodeHolder(object):
def __init__(self):
self.text = ''
self.depth = 0
def Add(self, text):
self.text += text
class try_finally_generator(object):
def __init__(self, codeHolder, tryBody, finallyBody):
self.code = codeHolder
self.tryBody = tryBody
self.finallyBody = finallyBody
def generate(self, indent=1):
self.code.Add(' '*indent); self.code.Add('try:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="try"\n')
self.tryBody.generate(indent+1)
self.code.Add(' '*indent); self.code.Add('finally:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="finally"\n')
self.code.Add(' '*(indent+1));self.code.Add('log+=dump_exc_info()\n')
self.finallyBody.generate(indent+1)
class try_except_generator(object):
def __init__(self, codeHolder, tryBody, exceptBody):
self.code = codeHolder
self.tryBody = tryBody
self.exceptBody = exceptBody
def generate(self, indent=1):
self.code.Add(' '*indent); self.code.Add('try:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="try"\n')
self.tryBody.generate(indent+1)
self.code.Add(' '*indent); self.code.Add('except:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="except"\n')
self.code.Add(' '*(indent+1));self.code.Add('log+=dump_exc_info()\n')
self.exceptBody.generate(indent+1)
class try_except_else_generator(object):
def __init__(self, codeHolder, tryBody, exceptBody, elseBody):
self.code = codeHolder
self.tryBody = tryBody
self.exceptBody = exceptBody
self.elseBody = elseBody
def generate(self, indent=1):
self.code.Add(' '*indent); self.code.Add('try:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="try"\n')
self.tryBody.generate(indent+1)
self.code.Add(' '*indent); self.code.Add('except:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="except"\n')
self.code.Add(' '*(indent+1));self.code.Add('log+=dump_exc_info()\n')
self.exceptBody.generate(indent+1)
self.code.Add(' '*indent); self.code.Add('else:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="else"\n')
self.code.Add(' '*(indent+1));self.code.Add('log+=dump_exc_info()\n')
self.elseBody.generate(indent+1)
class for_loop_generator(object):
def __init__(self, codeHolder, var, items, body):
self.code = codeHolder
self.var = var
self.items = items
self.body = body
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="preloop"\n')
self.code.Add(' '*indent);self.code.Add('for %s in %s:\n' % (self.var+str(indent), self.items))
self.code.Add(' '*(indent+1));self.code.Add('log+="inloop"\n')
self.body.generate(indent+1)
class while_loop_generator(object):
def __init__(self, codeHolder, body):
self.code = codeHolder
self.body = body
def generate(self, indent=1):
global uniqueCount
self.code.Add(' '*indent);self.code.Add('log+="preloop"\n')
self.code.Add(' '*indent);self.code.Add('whilevar%d_%d = 0\n' % (indent, uniqueCount))
self.code.Add(' '*indent);self.code.Add('while whilevar%d_%d < 3:\n' % (indent, uniqueCount))
self.code.Add(' '*(indent+1));self.code.Add('whilevar%d_%d += 1\n' % (indent, uniqueCount))
self.code.Add(' '*(indent+1));self.code.Add('log+="inloop"\n')
uniqueCount += 1
self.body.generate(indent+1)
class pass_generator(object):
def __init__(self, codeHolder):
self.code = codeHolder
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="pass"\n')
self.code.Add(' '*indent);self.code.Add('pass\n')
class break_generator(object):
def __init__(self, codeHolder):
self.code = codeHolder
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="break"\n')
self.code.Add(' '*indent);self.code.Add('break\n')
class continue_generator(object):
def __init__(self, codeHolder):
self.code = codeHolder
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="continue"\n')
self.code.Add(' '*indent);self.code.Add('continue\n')
class return_generator(object):
def __init__(self, codeHolder, state):
self.code = codeHolder
self.state = state
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="return"\n')
self.code.Add(' '*indent);self.code.Add('return %s\n' % self.state)
class if_false_generator(object):
def __init__(self, codeHolder, body):
self.code = codeHolder
self.body = body
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="preif"\n')
self.code.Add(' '*indent);self.code.Add('if False:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="huh?"\n')
self.body.generate(indent+1)
class if_true_generator(object):
def __init__(self, codeHolder, body):
self.code = codeHolder
self.body = body
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="preif"\n')
self.code.Add(' '*indent);self.code.Add('if True:\n')
self.code.Add(' '*(indent+1));self.code.Add('log+="true!"\n')
self.body.generate(indent+1)
class yield_generator(object):
def __init__(self, codeHolder, state):
self.code = codeHolder
self.state = state
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('log+="yield"\n')
self.code.Add(' '*indent);self.code.Add('yield %s\n' % self.state)
class raise_generator(object):
def __init__(self, codeHolder, state):
self.code = codeHolder
self.state = state
def generate(self, indent=1):
self.code.Add(' '*indent);self.code.Add('raise Exception("%s")\n' % self.state)
class define_generator(object):
def __init__(self, codeHolder, body):
self.code = codeHolder
self.body = body
def generate(self, indent=1):
global uniqueCount
saved = uniqueCount
uniqueCount += 1
self.code.Add(' '*indent);self.code.Add('log+="predefine"\n')
self.code.Add(' '*indent);self.code.Add('def func%d_%d():\n' % (indent, saved))
self.code.Add(' '*(indent+1));self.code.Add('global log\n')
self.body.generate(indent+1)
self.code.Add(' '*indent);self.code.Add('func%d_%d()\n' % (indent, saved))
ch = CodeHolder()
def for_loop_maker(ch, body):
return for_loop_generator(ch, 'x', 'range(3)', body)
def while_loop_maker(ch, body):
return while_loop_generator(ch, body)
def try_except_maker1(ch, body):
return try_except_generator(ch, pass_generator(ch), body)
def try_except_maker2(ch, body):
return try_except_generator(ch, body, pass_generator(ch))
def try_except_maker3(ch, body):
return try_except_generator(ch, body, body)
def try_finally_maker1(ch, body):
return try_finally_generator(ch, pass_generator(ch), body)
def try_finally_maker2(ch, body):
return try_finally_generator(ch, body, pass_generator(ch))
def try_finally_maker3(ch, body):
return try_finally_generator(ch, body, body)
def try_else_maker1(ch, body):
return try_except_else_generator(ch, pass_generator(ch), body, body)
def pass_maker(ch, body):
return pass_generator(ch)
def break_maker(ch, body):
return break_generator(ch)
def continue_maker(ch, body):
return continue_generator(ch)
def define_maker(ch, body):
return define_generator(ch, body)
def generator(): yield 2
generator_type = type(generator())
loopCnt = 0
generatorDepth = 0
yieldState = 0
finallyCnt = 0
tryOrCatchCount = 0
uniqueCount = 0
def dump_exc_info():
tb = sys.exc_info()[2]
tb_list = []
while tb is not None :
f = tb.tb_frame
co = f.f_code
filename = co.co_filename
#Shrink the filename a bit
if filename.count("\\"):
filename = filename.rsplit("\\", 1)[1]
name = co.co_name
tb_list.append((tb.tb_lineno, filename, name))
tb = tb.tb_next
return str(tb_list)
#------------------------------------------------------------------------------
allGenerators = []
def setGenerator(generator):
global allGenerators
allGenerators = [ generator ]
knownFailures = []
def setKnownFailures(failures):
global knownFailures
knownFailures = failures
class test_exceptions(object):
def test_exceptions(self):
if len(allGenerators)==0:
raise Exception("Need at least one generator from test_exceptions")
stateful = [raise_generator, return_generator]
ch = CodeHolder()
curTest = 0 # a counter so that trace backs have unique method names and we check left over info properly
for depth in xrange(3):
def do_generate(test):
global loopCnt, yieldState, finallyCnt, tryOrCatchCount, uniqueCount
yieldState += 1
if test in (for_loop_maker, while_loop_maker):
loopCnt += 1
if test in (try_finally_maker1, try_finally_maker2, try_finally_maker3):
finallyCnt += 1
if test in (try_except_maker1, try_except_maker2, try_except_maker3, try_else_maker1):
tryOrCatchCount += 1
genSet = [ for_loop_maker, while_loop_maker,
try_except_maker1, try_except_maker2, try_except_maker3,
try_finally_maker1, try_finally_maker2, try_finally_maker3,
try_else_maker1,
if_false_generator, if_true_generator,
define_maker,
]
if loopCnt > 0:
if finallyCnt > 0: genSet = genSet + [break_maker]
else: genSet = genSet + [break_maker, continue_maker]
if ch.depth > depth:
yield test(ch, pass_generator(ch))
else:
for testCase in genSet:
ch.depth += 1
x = do_generate(testCase)
for body in x:
yield test(ch, body)
ch.depth -= 1
for statefulGuy in stateful:
yield test(ch, statefulGuy(ch, yieldState))
if finallyCnt == 0:
yield test(ch, yield_generator(ch, yieldState))
if test in (for_loop_maker, while_loop_maker):
loopCnt -= 1
if test in (try_finally_maker1, try_finally_maker2, try_finally_maker3):
finallyCnt -= 1
if test in (try_except_maker1, try_except_maker2, try_except_maker3, try_else_maker1):
tryOrCatchCount -= 1
for testCase in allGenerators:
x = do_generate(testCase)
for y in x:
curTest += 1
if curTest in knownFailures:
continue
if 'IRONPYTHON_RUNSLOWTESTS' in nt.environ:
uniqueCount = 0
# run without a function
y.code.text = ''
y.generate(0)
y.code.text += 'print log'
d = {'log': '', 'dump_exc_info': dump_exc_info}
try:
#printwith(y.code.text)
exec y.code.text in d, d
except Exception, e:
printwith('same', sys.exc_type)
uniqueCount = 0
# run within a function
y.code.text = 'def test' + str(curTest) + '():\n'
y.code.text += ' global log\n'
y.generate()
d = {'log' : '', 'dump_exc_info': dump_exc_info}
try:
printwith(y.code.text)
exec y.code.text in d, d
except SyntaxError:
printwith("same", sys.exc_type)
continue
retval = None
try:
retval = d['test' + str(curTest)]()
if isinstance(retval, generator_type):
for it in retval: printwith('same', it)
else:
printwith('same', retval)
except:
printwith("same", sys.exc_type)
if isinstance(retval, generator_type):
retval.close()
printwith('same', d['log']) | apache-2.0 | 685,047,043,244,113,400 | 37.784946 | 113 | 0.528246 | false |
mpetyx/django-rdfGraph | graphBackend/graphBackend/api/rdfResource.py | 1 | 3651 | __author__ = 'mpetyx'
from tastypie.resources import Resource
from tastypie.authorization import Authorization
from tastypie.bundle import Bundle
from tastypie import fields
from rdfConnector import rdfClient
from RdfObject import RdfObject
"""
or this is also working
https://gist.github.com/mhluongo/5789513
taken from http://stackoverflow.com/questions/16920180/create-rest-api-with-neo4j-and-django
"""
class PersonResource(Resource):
# Just like a Django ``Form`` or ``Model``, we're defining all the
# fields we're going to handle with the API here.
uuid = fields.CharField(attribute='uuid')
user_uuid = fields.CharField(attribute='user_uuid')
age = fields.CharField(attribute='age')
created = fields.IntegerField(attribute='created')
class Meta:
resource_name = 'person'
object_class = RdfObject
authorization = Authorization()
# Specific to this resource, just to get the needed Riak bits.
def _client(self):
return rdfClient()
def _bucket(self):
client = self._client()
# Every bucket could actually infer its name from the resource and could actually implement the current specific
# graph. so for example, for person there could be a person_bucket_graph that with the add_new functionality could
# inherit its schema and connect it to that
return client.bucket('rdfbucket')
# The following methods will need overriding regardless of your
# data source.
def detail_uri_kwargs(self, bundle_or_obj):
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.uuid
else:
kwargs['pk'] = bundle_or_obj.uuid
return kwargs
def get_object_list(self, request):
# query = self._client().add('rdfbucket')
# query.map("function(v) { var data = JSON.parse(v.values[0].data); return [[v.key, data]]; }")
# results = []
#
# for result in query.run():
# new_obj = RdfObject(initial=result[1])
# new_obj.uuid = result[0]
# results.append(new_obj)
results = []
example = {}
example['age'] = 2
example['user_uuid'] = 3
example['uuid'] = 56
example['created'] = 45
example = RdfObject(initial=example)
results.append(example)
return results
def obj_get_list(self, request=None, **kwargs):
# Filtering disabled for brevity...
return self.get_object_list(request)
def obj_get(self, request=None, **kwargs):
bucket = self._bucket()
message = bucket.get(kwargs['pk'])
return RdfObject(initial=message.get_data())
def obj_create(self, bundle, request=None, **kwargs):
bundle.obj = RdfObject(initial=kwargs)
bundle = self.full_hydrate(bundle)
bucket = self._bucket()
# new_message = bucket.new(bundle.obj.uuid, data=bundle.obj.to_dict())
# new_message.store()
import pprint
temp = bundle.obj.to_dict()
pprint.pprint(temp)
print temp['age']
return bundle
def obj_update(self, bundle, request=None, **kwargs):
return self.obj_create(bundle, request, **kwargs)
def obj_delete_list(self, request=None, **kwargs):
bucket = self._bucket()
for key in bucket.get_keys():
obj = bucket.get(key)
obj.delete()
def obj_delete(self, request=None, **kwargs):
bucket = self._bucket()
obj = bucket.get(kwargs['pk'])
obj.delete()
def rollback(self, bundles):
pass | mit | 3,618,065,893,409,287,000 | 30.482759 | 122 | 0.620926 | false |
larsks/cloud-init | cloudinit/config/cc_ubuntu_drivers.py | 2 | 5800 | # This file is part of cloud-init. See LICENSE file for license information.
"""Ubuntu Drivers: Interact with third party drivers in Ubuntu."""
import os
from textwrap import dedent
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import temp_utils
from cloudinit import type_utils
from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
distros = ['ubuntu']
schema = {
'id': 'cc_ubuntu_drivers',
'name': 'Ubuntu Drivers',
'title': 'Interact with third party drivers in Ubuntu.',
'description': dedent("""\
This module interacts with the 'ubuntu-drivers' command to install
third party driver packages."""),
'distros': distros,
'examples': [dedent("""\
drivers:
nvidia:
license-accepted: true
""")],
'frequency': frequency,
'type': 'object',
'properties': {
'drivers': {
'type': 'object',
'additionalProperties': False,
'properties': {
'nvidia': {
'type': 'object',
'additionalProperties': False,
'required': ['license-accepted'],
'properties': {
'license-accepted': {
'type': 'boolean',
'description': ("Do you accept the NVIDIA driver"
" license?"),
},
'version': {
'type': 'string',
'description': (
'The version of the driver to install (e.g.'
' "390", "410"). Defaults to the latest'
' version.'),
},
},
},
},
},
},
}
OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
"ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
__doc__ = get_schema_doc(schema) # Supplement python help()
# Use a debconf template to configure a global debconf variable
# (linux/nvidia/latelink) setting this to "true" allows the
# 'linux-restricted-modules' deb to accept the NVIDIA EULA and the package
# will automatically link the drivers to the running kernel.
# EOL_XENIAL: can then drop this script and use python3-debconf which is only
# available in Bionic and later. Can't use python3-debconf currently as it
# isn't in Xenial and doesn't yet support X_LOADTEMPLATEFILE debconf command.
NVIDIA_DEBCONF_CONTENT = """\
Template: linux/nvidia/latelink
Type: boolean
Default: true
Description: Late-link NVIDIA kernel modules?
Enable this to link the NVIDIA kernel modules in cloud-init and
make them available for use.
"""
NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT = """\
#!/bin/sh
# Allow cloud-init to trigger EULA acceptance via registering a debconf
# template to set linux/nvidia/latelink true
. /usr/share/debconf/confmodule
db_x_loadtemplatefile "$1" cloud-init
"""
def install_drivers(cfg, pkg_install_func):
if not isinstance(cfg, dict):
raise TypeError(
"'drivers' config expected dict, found '%s': %s" %
(type_utils.obj_name(cfg), cfg))
cfgpath = 'nvidia/license-accepted'
# Call translate_bool to ensure that we treat string values like "yes" as
# acceptance and _don't_ treat string values like "nah" as acceptance
# because they're True-ish
nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath))
if not nv_acc:
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
if not util.which('ubuntu-drivers'):
LOG.debug("'ubuntu-drivers' command not available. "
"Installing ubuntu-drivers-common")
pkg_install_func(['ubuntu-drivers-common'])
driver_arg = 'nvidia'
version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
if version_cfg:
driver_arg += ':{}'.format(version_cfg)
LOG.debug("Installing and activating NVIDIA drivers (%s=%s, version=%s)",
cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
# Register and set debconf selection linux/nvidia/latelink = true
tdir = temp_utils.mkdtemp(needs_exe=True)
debconf_file = os.path.join(tdir, 'nvidia.template')
debconf_script = os.path.join(tdir, 'nvidia-debconf.sh')
try:
util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
util.write_file(
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
mode=0o755)
util.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
LOG, "Failed to register NVIDIA debconf template: %s", str(e))
raise
finally:
if os.path.isdir(tdir):
util.del_dir(tdir)
try:
util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
except util.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
LOG.warning('the available version of ubuntu-drivers is'
' too old to perform requested driver installation')
elif 'No drivers found for installation.' in exc.stdout:
LOG.warning('ubuntu-drivers found no drivers for installation')
raise
def handle(name, cfg, cloud, log, _args):
if "drivers" not in cfg:
log.debug("Skipping module named %s, no 'drivers' key in config", name)
return
validate_cloudconfig_schema(cfg, schema)
install_drivers(cfg['drivers'], cloud.distro.install_packages)
| gpl-3.0 | -5,457,773,030,827,138,000 | 35.25 | 79 | 0.605517 | false |
codekaki/odoo | addons/warning/warning.py | 13 | 14284 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sales Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
res_partner()
class sale_order(osv.osv):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'payment_term' : False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.sale_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if partner.sale_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
sale_order()
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part):
if not part:
return {'value':{'partner_address_id': False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part)
if partner.purchase_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
if partner.purchase_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
purchase_order()
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
if not partner_id:
return {'value': {
'account_id': False,
'payment_term': False,
}
}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
if partner.invoice_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.invoice_warn_msg
warning = {
'title': title,
'message': message
}
if partner.invoice_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice=date_invoice, payment_term=payment_term,
partner_bank_id=partner_bank_id, company_id=company_id)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
account_invoice()
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(stock_picking, self).onchange_partner_in(cr, uid, ids, partner_id, context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
stock_picking()
# FIXME:(class stock_picking_in and stock_picking_out) this is a temporary workaround because of a framework bug (ref: lp:996816).
# It should be removed as soon as the bug is fixed
class stock_picking_in(osv.osv):
_inherit = 'stock.picking.in'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(stock_picking_in, self).onchange_partner_in(cr, uid, ids, partner_id, context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class stock_picking_out(osv.osv):
_inherit = 'stock.picking.out'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(stock_picking_out, self).onchange_partner_in(cr, uid, ids, partner_id, context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class product_product(osv.osv):
_inherit = 'product.product'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sales Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
product_product()
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None):
warning = {}
if not product:
return {'value': {'th_weight' : 0, 'product_packaging': False,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.sale_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(sale_order_line, self).product_id_change( cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
sale_order_line()
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
warning = {}
if not product:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom or False}, 'domain':{'product_uom':[]}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.purchase_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.purchase_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(purchase_order_line, self).onchange_product_id(cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order, fiscal_position_id, date_planned, name, price_unit, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
purchase_order_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,694,981,993,264,431,000 | 41.260355 | 224 | 0.578339 | false |
nabsboss/CouchPotatoServer | couchpotato/core/plugins/score/main.py | 5 | 1798 | from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.score.scores import nameScore, nameRatioScore, \
sizeScore, providerScore, duplicateScore, partialIgnoredScore, namePositionScore, \
halfMultipartScore
log = CPLog(__name__)
class Score(Plugin):
def __init__(self):
addEvent('score.calculate', self.calculate)
def calculate(self, nzb, movie):
''' Calculate the score of a NZB, used for sorting later '''
score = nameScore(toUnicode(nzb['name']), movie['library']['year'])
for movie_title in movie['library']['titles']:
score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title']))
score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title']))
score += sizeScore(nzb['size'])
# Torrents only
if nzb.get('seeders'):
try:
score += nzb.get('seeders') / 5
score += nzb.get('leechers') / 10
except:
pass
# Provider score
score += providerScore(nzb['provider'])
# Duplicates in name
score += duplicateScore(nzb['name'], getTitle(movie['library']))
# Partial ignored words
score += partialIgnoredScore(nzb['name'], getTitle(movie['library']))
# Ignore single downloads from multipart
score += halfMultipartScore(nzb['name'])
# Extra provider specific check
extra_score = nzb.get('extra_score')
if extra_score:
score += extra_score(nzb)
return score
| gpl-3.0 | 8,710,509,199,096,340,000 | 32.296296 | 95 | 0.634594 | false |
zenoss/pywbem | tests/unittest/pywbem/test_cim_xml.py | 1 | 55852 | """
Unit tests for pywbem cim_xml.py module.
"""
from __future__ import absolute_import
try:
from collections.abc import Iterable
except ImportError: # py2
from collections import Iterable
import unittest
import six
import pytest
from pywbem import cim_xml
from ..utils.validate import validate_cim_xml, CIMXMLValidationError
from ..utils.pytest_extensions import simplified_test_function
def iter_flattened(lst):
"""
Flatten the arbitrarily nested input list of lists, and yield each
resulting list item.
"""
for item in lst:
if isinstance(item, Iterable) and \
not isinstance(item, six.string_types):
for sub_item in iter_flattened(item):
yield sub_item
else:
yield item
def sample_LOCALNAMESPACEPATH_node():
"""
Return a sample LOCALNAMESPACEPATH as a cim_xml node.
Must match the result of the corresponding ..._str() function.
"""
return cim_xml.LOCALNAMESPACEPATH(
[cim_xml.NAMESPACE('root'),
cim_xml.NAMESPACE('cimv2')])
def sample_LOCALNAMESPACEPATH_str():
"""
Return a sample LOCALNAMESPACEPATH as a list of XML strings.
Must match the result of the corresponding ..._node() function.
"""
return [
'<LOCALNAMESPACEPATH>',
'<NAMESPACE NAME="root"/>',
'<NAMESPACE NAME="cimv2"/>',
'</LOCALNAMESPACEPATH>',
]
def sample_NAMESPACEPATH_node():
"""
Return a sample NAMESPACEPATH as a cim_xml node.
Must match the result of the corresponding ..._str() function.
"""
return cim_xml.NAMESPACEPATH(
cim_xml.HOST('leonardo'),
sample_LOCALNAMESPACEPATH_node())
def sample_NAMESPACEPATH_str():
"""
Return a sample NAMESPACEPATH as a (nested) list of XML strings.
Must match the result of the corresponding ..._node() function.
"""
return [
'<NAMESPACEPATH>',
'<HOST>leonardo</HOST>',
sample_LOCALNAMESPACEPATH_str(),
'</NAMESPACEPATH>',
]
def sample_CLASSNAME_node():
"""
Return a sample CLASSNAME as a cim_xml node.
Must match the result of the corresponding ..._str() function.
"""
return cim_xml.CLASSNAME('CIM_Foo')
def sample_CLASSNAME_str():
"""
Return a sample CLASSNAME as a list of XML strings.
Must match the result of the corresponding ..._node() function.
"""
return [
'<CLASSNAME NAME="CIM_Foo"/>',
]
def sample_LOCALCLASSPATH_node():
"""
Return a sample LOCALCLASSPATH as a cim_xml node.
Must match the result of the corresponding ..._str() function.
"""
return cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_CLASSNAME_node())
def sample_LOCALCLASSPATH_str():
"""
Return a sample LOCALCLASSPATH as a (nested) list of XML strings.
Must match the result of the corresponding ..._node() function.
"""
return [
'<LOCALCLASSPATH>',
sample_LOCALNAMESPACEPATH_str(),
sample_CLASSNAME_str(),
'</LOCALCLASSPATH>',
]
def sample_CLASSPATH_node():
"""
Return a sample CLASSPATH as a cim_xml node.
Must match the result of the corresponding ..._str() function.
"""
return cim_xml.CLASSPATH(
sample_NAMESPACEPATH_node(),
sample_CLASSNAME_node())
def sample_CLASSPATH_str():
"""
Return a sample CLASSPATH as a (nested) list of XML strings.
Must match the result of the corresponding ..._node() function.
"""
return [
'<CLASSPATH>',
sample_NAMESPACEPATH_str(),
sample_CLASSNAME_str(),
'</CLASSPATH>',
]
def sample_INSTANCENAME_node():
"""
Return a sample INSTANCENAME as a cim_xml node.
Must match the result of the corresponding ..._str() function.
"""
return cim_xml.INSTANCENAME(
'CIM_Pet',
[cim_xml.KEYBINDING(
'type',
cim_xml.KEYVALUE('dog', 'string')),
cim_xml.KEYBINDING(
'age',
cim_xml.KEYVALUE('2', 'numeric'))])
def sample_INSTANCENAME_str():
"""
Return a sample INSTANCENAME as a (nested) list of XML strings.
Must match the result of the corresponding ..._node() function.
"""
return [
'<INSTANCENAME CLASSNAME="CIM_Pet">',
'<KEYBINDING NAME="type">',
'<KEYVALUE TYPE="string">dog</KEYVALUE>',
'</KEYBINDING>',
'<KEYBINDING NAME="age">',
'<KEYVALUE TYPE="numeric">2</KEYVALUE>',
'</KEYBINDING>',
'</INSTANCENAME>',
]
def sample_LOCALINSTANCEPATH_node():
"""
Return a sample LOCALINSTANCEPATH as a cim_xml node.
Must match the result of the corresponding ..._str() function.
"""
return cim_xml.LOCALINSTANCEPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_INSTANCENAME_node())
def sample_LOCALINSTANCEPATH_str():
"""
Return a sample LOCALINSTANCEPATH as a (nested) list of XML strings.
Must match the result of the corresponding ..._node() function.
"""
return [
'<LOCALINSTANCEPATH>',
sample_LOCALNAMESPACEPATH_str(),
sample_INSTANCENAME_str(),
'</LOCALINSTANCEPATH>',
]
TESTCASES_CIM_XML_NODE = [
# Testcases for cim_xml nodes.
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * xml_node: cim_xml node to be tested (cim_xml.CIMElement subclass).
# * exp_xml_str_list: Expected XML string of the cim_xml node, as a
# (possibly nested) list of strings, or None.
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
# CIM top level element
(
"CIM: Simple request for EnumerateInstances",
dict(
xml_node=cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'EnumerateInstances',
sample_LOCALNAMESPACEPATH_node(),
[
cim_xml.IPARAMVALUE(
'ClassName',
sample_CLASSNAME_node(),
),
],
),
),
'1001', '1.0'),
'2.0', '2.0'),
exp_xml_str_list=None,
),
None, None, True
),
(
"CIM: Simple request for extrinsic method call (no parms)",
dict(
xml_node=cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.METHODCALL(
'MyMethod',
sample_LOCALINSTANCEPATH_node(),
),
),
'1001', '1.0'),
'2.0', '2.0'),
exp_xml_str_list=None,
),
None, None, True
),
(
"VALUE.REFERENCE: Using sample CLASSPATH",
dict(
xml_node=cim_xml.VALUE_REFERENCE(
sample_CLASSPATH_node(),
),
exp_xml_str_list=[
'<VALUE.REFERENCE>',
sample_CLASSPATH_str(),
'</VALUE.REFERENCE>',
],
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_CIM_XML_NODE)
@simplified_test_function
def test_cim_xml_node(testcase, xml_node, exp_xml_str_list):
"""
Test function for a cim_xml node.
A cim_xml node is an object of a subclass of cim_xml.CIMElement.
The helper functions defined in class CIMElement are used only by the
__init__() methods of its subclasses, so they are not tested separately.
The __init__() method of the cim_xml node has already been executed during
creation of the testcase list for this test function, so this test
function checks the resulting cim_xml node object, as follows:
* Create the XML string for the cim_xml node, using `toxml()`. That method
is inherited from some base class of CIMElement and is not subject of
the test.
* Validate this XML string against the CIM-XML DTD.
* Compare this XML string against the expected CIM-XML string, if one is
defined.
"""
act_xml_str = xml_node.toxml()
try:
validate_cim_xml(act_xml_str)
except CIMXMLValidationError as exc:
raise AssertionError(
"DTD validation of CIM-XML failed:\n"
"{0}\n"
"CIM-XML string:\n"
"{1}".
format(exc, act_xml_str))
if exp_xml_str_list is not None:
# It is a (possibly nested) list of strings
exp_xml_str = ''.join(iter_flattened(exp_xml_str_list))
assert act_xml_str == exp_xml_str, \
"Unexpected CIM-XML string:\n" \
" expected: {0!r}\n" \
" actual: {1!r}". \
format(exp_xml_str, act_xml_str)
###############################################################################
#
# TODO: Convert the following test classes to items in TESTCASES_CIM_XML_NODE
#
class CIMXMLTest(unittest.TestCase):
"""Run validate.py script against an xml document fragment."""
def setUp(self):
# List of test cases, each list item being an xml.dom.minidom node
# representing some element from the CIM-XML payload.
self.xml = []
# List of expected XML strings resulting from each test case.
self.xml_str = []
@staticmethod
def validate(xml, expectedResult=0):
# pylint: disable=unused-argument
"""xml is a string with the CIM-XML."""
try:
validate_cim_xml(xml)
except CIMXMLValidationError as exc:
raise AssertionError(
"DTD validation of CIM-XML failed:\n"
"{0}\n"
"CIM-XML string:\n"
"{1}".
format(exc, xml))
def test_all(self):
"""Loop over xml to execute tests"""
for i in range(0, len(self.xml)):
xml_node = self.xml[i]
act_xml_str = xml_node.toxml()
# Test that the XML fragments pass validation against CIM-XML DTD
self.validate(act_xml_str)
if i < len(self.xml_str):
# Test XML fragments for expected string representation
exp_xml_str = self.xml_str[i]
if exp_xml_str is not None:
self.assertEqual(act_xml_str, exp_xml_str,
"CIM-XML fragment to be tested: %r" %
act_xml_str)
# pylint: disable=too-few-public-methods
class UnimplementedTest(object):
"""Test unimplemented. Raise AssertionError"""
@staticmethod
def test_all():
"""raise Assertion Error"""
raise AssertionError('Unimplemented test')
#################################################################
# 3.2.1. Top Level Elements
#################################################################
# 3.2.1.1. CIM
class CIM(CIMXMLTest):
"""CIM Top level element as class"""
def setUp(self):
"""setUp for CIM class"""
super(CIM, self).setUp()
self.xml.append(cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'IntrinsicMethod',
sample_LOCALNAMESPACEPATH_node())),
'1001', '1.0'),
'2.0', '2.0'))
#################################################################
# 3.2.2. Declaration Elements
#################################################################
# 3.2.2.1. DECLARATION
# 3.2.2.2. DECLGROUP
# 3.2.2.3. DECLGROUP.WITHNAME
# 3.2.2.4. DECLGROUP.WITHPATH
# 3.2.2.5. QUALIFIER.DECLARATION
# 3.2.2.6. SCOPE
# pylint: disable=too-few-public-methods
class Declaration(UnimplementedTest):
"""
<!ELEMENT DECLARATION (DECLGROUP|DECLGROUP.WITHNAME|DECLGROUP.WITHPATH)+>
"""
# pylint: disable=too-few-public-methods
class DeclGroup(UnimplementedTest):
"""
<!ELEMENT DECLGROUP ((LOCALNAMESPACEPATH|NAMESPACEPATH)?,
QUALIFIER.DECLARATION*,VALUE.OBJECT*)>
"""
pass
# pylint: disable=too-few-public-methods
class DeclGroupWithName(UnimplementedTest):
"""
<!ELEMENT DECLGROUP.WITHNAME ((LOCALNAMESPACEPATH|NAMESPACEPATH)?,
QUALIFIER.DECLARATION*,VALUE.NAMEDOBJECT*)>
"""
# pylint: disable=too-few-public-methods
class DeclGroupWithPath(UnimplementedTest):
"""
<!ELEMENT DECLGROUP.WITHPATH (VALUE.OBJECTWITHPATH|
VALUE.OBJECTWITHLOCALPATH)*>
"""
# pylint: disable=too-few-public-methods
class QualifierDeclaration(UnimplementedTest):
"""
<!ELEMENT QUALIFIER.DECLARATION (SCOPE?, (VALUE | VALUE.ARRAY)?)>
<!ATTLIST QUALIFIER.DECLARATION
%CIMName;
%CIMType; #REQUIRED
ISARRAY (true|false) #IMPLIED
%ArraySize;
%QualifierFlavor;>
"""
class Scope(CIMXMLTest):
"""
<!ELEMENT SCOPE EMPTY>
<!ATTLIST SCOPE
CLASS (true|false) 'false'
ASSOCIATION (true|false) 'false'
REFERENCE (true|false) 'false'
PROPERTY (true|false) 'false'
METHOD (true|false) 'false'
PARAMETER (true|false) 'false'
INDICATION (true|false) 'false'>
"""
def setUp(self):
super(Scope, self).setUp()
self.xml.append(cim_xml.SCOPE())
#################################################################
# 3.2.3. Value Elements
#################################################################
# 3.2.3.1. VALUE
# 3.2.3.2. VALUE.ARRAY
# 3.2.3.3. VALUE.REFERENCE
# 3.2.3.4. VALUE.REFARRAY
# 3.2.3.5. VALUE.OBJECT
# 3.2.3.6. VALUE.NAMEDINSTANCE
# 3.2.3.7. VALUE.NAMEDOBJECT
# 3.2.3.8. VALUE.OBJECTWITHPATH
# 3.2.3.9. VALUE.OBJECTWITHLOCALPATH
# 3.2.3.10. VALUE.NULL
class Value(CIMXMLTest):
"""
<!ELEMENT VALUE (#PCDATA)>
"""
def setUp(self):
super(Value, self).setUp()
# The VALUE element depends on whether XML-based or CDATA-based
# escaping is used. Therefore, we modify the module-level switch that
# controls that and run each test twice (wth different expected XML
# strings).
cim_xml._CDATA_ESCAPING = True # pylint: disable=protected-access
self.xml.append(cim_xml.VALUE('dog'))
self.xml_str.append('<VALUE>dog</VALUE>')
# self.xml.append(cim_xml.VALUE(None))
# Note: This is illegal, Value.Null should be used instead.
self.xml.append(cim_xml.VALUE(''))
self.xml_str.append('<VALUE></VALUE>') # Assum. not folded to <VALUE/>
# Some control characters
self.xml.append(cim_xml.VALUE('a\nb\rc\td'))
self.xml_str.append('<VALUE>a\nb\rc\td</VALUE>') # Assuming XML 1.1
# Some XML special characters
self.xml.append(cim_xml.VALUE('a&b<c>d'))
self.xml_str.append('<VALUE><![CDATA[a&b<c>d]]></VALUE>')
# Some XML special characters, already XML-escaped
self.xml.append(cim_xml.VALUE('a&b<c>d'))
self.xml_str.append('<VALUE><![CDATA[a&b<c>d]]></VALUE>')
# Some XML special characters, already CDATA-escaped
self.xml.append(cim_xml.VALUE('<![CDATA[a&b<c>d]]>'))
self.xml_str.append(
'<VALUE><![CDATA[<![CDATA[a&b<c>d]]]><![CDATA[]>]]></VALUE>')
# set back to its default
cim_xml._CDATA_ESCAPING = False # pylint: disable=protected-access
self.xml.append(cim_xml.VALUE('dog'))
self.xml_str.append('<VALUE>dog</VALUE>')
# self.xml.append(cim_xml.VALUE(None))
# Note: This is illegal, Value.Null is used instead.
self.xml.append(cim_xml.VALUE(''))
self.xml_str.append('<VALUE></VALUE>') # Assum. not folded to <VALUE/>
# Some control characters
self.xml.append(cim_xml.VALUE('a\nb\rc\td'))
self.xml_str.append('<VALUE>a\nb\rc\td</VALUE>') # Assuming XML 1.1
# Some XML special characters
self.xml.append(cim_xml.VALUE('a&b<c>d'))
self.xml_str.append('<VALUE>a&b<c>d</VALUE>')
# Some XML special characters, already XML-escaped
self.xml.append(cim_xml.VALUE('a&b<c>d'))
self.xml_str.append('<VALUE>a&amp;b&lt;c&gt;d</VALUE>')
# Some XML special characters, already CDATA-escaped
self.xml.append(cim_xml.VALUE('<![CDATA[a&b<c>d]]>'))
self.xml_str.append(
'<VALUE><![CDATA[a&b<c>d]]></VALUE>')
class ValueArray(CIMXMLTest):
"""
<!ELEMENT VALUE.ARRAY (VALUE*)>
"""
def setUp(self):
super(ValueArray, self).setUp()
self.xml.append(cim_xml.VALUE_ARRAY([]))
self.xml.append(cim_xml.VALUE_ARRAY([cim_xml.VALUE('cat'),
cim_xml.VALUE('dog')]))
class ValueReference(CIMXMLTest):
"""
<!ELEMENT VALUE.REFERENCE (CLASSPATH|LOCALCLASSPATH|CLASSNAME|
INSTANCEPATH|LOCALINSTANCEPATH|INSTANCENAME)>
"""
def setUp(self):
super(ValueReference, self).setUp()
# CLASSPATH
self.xml.append(cim_xml.VALUE_REFERENCE(sample_CLASSPATH_node()))
# LOCALCLASSPATH
self.xml.append(cim_xml.VALUE_REFERENCE(
cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_CLASSNAME_node())))
# CLASSNAME
self.xml.append(cim_xml.VALUE_REFERENCE(sample_CLASSNAME_node()))
# INSTANCEPATH
self.xml.append(cim_xml.VALUE_REFERENCE(
cim_xml.INSTANCEPATH(
sample_NAMESPACEPATH_node(), sample_INSTANCENAME_node())))
# LOCALINSTANCEPATH
self.xml.append(cim_xml.VALUE_REFERENCE(
cim_xml.LOCALINSTANCEPATH(
sample_LOCALNAMESPACEPATH_node(), sample_INSTANCENAME_node())))
# INSTANCENAME
self.xml.append(cim_xml.VALUE_REFERENCE(sample_INSTANCENAME_node()))
class ValueRefArray(CIMXMLTest):
"""
<!ELEMENT VALUE.REFARRAY (VALUE.REFERENCE*)>
"""
def setUp(self):
super(ValueRefArray, self).setUp()
# Empty
self.xml.append(cim_xml.VALUE_REFARRAY([]))
# VALUE.REFARRAY
self.xml.append(cim_xml.VALUE_REFARRAY(
[cim_xml.VALUE_REFERENCE(cim_xml.CLASSNAME('CIM_Foo')),
cim_xml.VALUE_REFERENCE(cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(), sample_CLASSNAME_node()))]))
class ValueObject(CIMXMLTest):
"""
<!ELEMENT VALUE.OBJECT (CLASS|INSTANCE)>
"""
def setUp(self):
super(ValueObject, self).setUp()
# CLASS
self.xml.append(cim_xml.VALUE_OBJECT(cim_xml.CLASS('CIM_Foo')))
# INSTANCE
self.xml.append(cim_xml.VALUE_OBJECT(cim_xml.INSTANCE('CIM_Pet', [])))
class ValueNamedInstance(CIMXMLTest):
"""
<!ELEMENT VALUE.NAMEDINSTANCE (INSTANCENAME,INSTANCE)>
"""
def setUp(self):
super(ValueNamedInstance, self).setUp()
self.xml.append(cim_xml.VALUE_NAMEDINSTANCE(
sample_INSTANCENAME_node(),
cim_xml.INSTANCE('CIM_Pet', [])))
class ValueNamedObject(CIMXMLTest):
"""
<!ELEMENT VALUE.NAMEDOBJECT (CLASS|(INSTANCENAME,INSTANCE))>
"""
def setUp(self):
super(ValueNamedObject, self).setUp()
# CLASS
self.xml.append(cim_xml.VALUE_NAMEDOBJECT(
cim_xml.CLASS('CIM_Foo')))
# INSTANCENAME, INSTANCE
self.xml.append(cim_xml.VALUE_NAMEDOBJECT(
(sample_INSTANCENAME_node(),
cim_xml.INSTANCE('CIM_Pet', []))))
class ValueObjectWithPath(CIMXMLTest):
"""
<!ELEMENT VALUE.OBJECTWITHPATH ((CLASSPATH,CLASS)|
(INSTANCEPATH,INSTANCE))>
"""
def setUp(self):
super(ValueObjectWithPath, self).setUp()
# (CLASSPATH, CLASS)
self.xml.append(cim_xml.VALUE_OBJECTWITHPATH(
sample_CLASSPATH_node(),
cim_xml.CLASS('CIM_Foo')))
# (INSTANCEPATH, INSTANCE)
self.xml.append(cim_xml.VALUE_OBJECTWITHPATH(
cim_xml.INSTANCEPATH(
sample_NAMESPACEPATH_node(), sample_INSTANCENAME_node()),
cim_xml.INSTANCE('CIM_Pet', [])))
class ValueObjectWithLocalPath(CIMXMLTest):
"""
<!ELEMENT VALUE.OBJECTWITHLOCALPATH ((LOCALCLASSPATH,CLASS)|
(LOCALINSTANCEPATH,INSTANCE))>
"""
def setUp(self):
super(ValueObjectWithLocalPath, self).setUp()
# (LOCALCLASSPATH, CLASS)
self.xml.append(cim_xml.VALUE_OBJECTWITHLOCALPATH(
cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_CLASSNAME_node()),
cim_xml.CLASS('CIM_Foo')))
# (LOCALINSTANCEPATH, INSTANCE)
self.xml.append(cim_xml.VALUE_OBJECTWITHLOCALPATH(
cim_xml.LOCALINSTANCEPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_INSTANCENAME_node()),
cim_xml.INSTANCE('CIM_Pet', [])))
# pylint: disable=too-few-public-methods
class ValueNull(UnimplementedTest):
"""
<!ELEMENT VALUE.NULL EMPTY>
"""
# TODO: Implement ValueNull test
class ValueInstanceWithPath(CIMXMLTest):
"""
<!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH,INSTANCE)>
"""
def setUp(self):
super(ValueInstanceWithPath, self).setUp()
self.xml.append(cim_xml.VALUE_INSTANCEWITHPATH(
cim_xml.INSTANCEPATH(
sample_NAMESPACEPATH_node(), sample_INSTANCENAME_node()),
cim_xml.INSTANCE('CIM_Pet', [])))
#################################################################
# 3.2.4. Naming and Location Elements
#################################################################
# 3.2.4.1. NAMESPACEPATH
# 3.2.4.2. LOCALNAMESPACEPATH
# 3.2.4.3. HOST
# 3.2.4.4. NAMESPACE
# 3.2.4.5. CLASSPATH
# 3.2.4.6. LOCALCLASSPATH
# 3.2.4.7. CLASSNAME
# 3.2.4.8. INSTANCEPATH
# 3.2.4.9. LOCALINSTANCEPATH
# 3.2.4.10. INSTANCENAME
# 3.2.4.11. OBJECTPATH
# 3.2.4.12. KEYBINDING
# 3.2.4.13. KEYVALUE
class NamespacePath(CIMXMLTest):
"""
<!ELEMENT NAMESPACEPATH (HOST,LOCALNAMESPACEPATH)>
"""
def setUp(self):
super(NamespacePath, self).setUp()
self.xml.append(sample_NAMESPACEPATH_node())
class LocalNamespacePath(CIMXMLTest):
"""
<!ELEMENT LOCALNAMESPACEPATH (NAMESPACE+)>
"""
def setUp(self):
super(LocalNamespacePath, self).setUp()
self.xml.append(sample_LOCALNAMESPACEPATH_node())
class Host(CIMXMLTest):
"""
<!ELEMENT HOST (#PCDATA)>
"""
def setUp(self):
super(Host, self).setUp()
self.xml.append(cim_xml.HOST('leonardo'))
class Namespace(CIMXMLTest):
"""
<!ELEMENT NAMESPACE EMPTY>
<!ATTLIST NAMESPACE
%CIMName;>
"""
def setUp(self):
super(Namespace, self).setUp()
self.xml.append(cim_xml.NAMESPACE('root'))
class ClassPath(CIMXMLTest):
"""
<!ELEMENT CLASSPATH (NAMESPACEPATH,CLASSNAME)>
"""
def setUp(self):
super(ClassPath, self).setUp()
self.xml.append(sample_CLASSPATH_node())
class LocalClassPath(CIMXMLTest):
"""
<!ELEMENT LOCALCLASSPATH (LOCALNAMESPACEPATH, CLASSNAME)>
"""
def setUp(self):
super(LocalClassPath, self).setUp()
self.xml.append(cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(), sample_CLASSNAME_node()))
class ClassName(CIMXMLTest):
"""
<!ELEMENT CLASSNAME EMPTY>
<!ATTLIST CLASSNAME
%CIMName;>
"""
def setUp(self):
super(ClassName, self).setUp()
self.xml.append(sample_CLASSNAME_node())
class InstancePath(CIMXMLTest):
"""
<!ELEMENT INSTANCEPATH (NAMESPACEPATH,INSTANCENAME)>
"""
def setUp(self):
super(InstancePath, self).setUp()
self.xml.append(cim_xml.INSTANCEPATH(
sample_NAMESPACEPATH_node(), sample_INSTANCENAME_node()))
class LocalInstancePath(CIMXMLTest):
"""
<!ELEMENT LOCALINSTANCEPATH (LOCALNAMESPACEPATH,INSTANCENAME)>
"""
def setUp(self):
super(LocalInstancePath, self).setUp()
self.xml.append(cim_xml.LOCALINSTANCEPATH(
sample_LOCALNAMESPACEPATH_node(), sample_INSTANCENAME_node()))
class InstanceName(CIMXMLTest):
"""
<!ELEMENT INSTANCENAME (KEYBINDING*|KEYVALUE?|VALUE.REFERENCE?)>
<!ATTLIST INSTANCENAME
%ClassName;>
"""
def setUp(self):
super(InstanceName, self).setUp()
# Empty
self.xml.append(cim_xml.INSTANCENAME('CIM_Pet', None))
# KEYBINDING
self.xml.append(sample_INSTANCENAME_node())
# KEYVALUE
self.xml.append(cim_xml.INSTANCENAME(
'CIM_Pet', cim_xml.KEYVALUE('FALSE', 'boolean')))
# VALUE.REFERENCE
self.xml.append(cim_xml.INSTANCENAME(
'CIM_Pet',
cim_xml.VALUE_REFERENCE(sample_INSTANCENAME_node())))
class ObjectPath(CIMXMLTest):
"""
<!ELEMENT OBJECTPATH (INSTANCEPATH|CLASSPATH)>
"""
def setUp(self):
super(ObjectPath, self).setUp()
self.xml.append(cim_xml.OBJECTPATH(
cim_xml.INSTANCEPATH(
sample_NAMESPACEPATH_node(),
sample_INSTANCENAME_node())))
self.xml.append(cim_xml.OBJECTPATH(
sample_CLASSPATH_node()))
class KeyBinding(CIMXMLTest):
"""
<!ELEMENT KEYBINDING (KEYVALUE|VALUE.REFERENCE)>
<!ATTLIST KEYBINDING
%CIMName;>
"""
def setUp(self):
super(KeyBinding, self).setUp()
self.xml.append(cim_xml.KEYBINDING(
'pet', cim_xml.KEYVALUE('dog', 'string')))
self.xml.append(cim_xml.KEYBINDING(
'CIM_Foo',
cim_xml.VALUE_REFERENCE(sample_CLASSPATH_node())))
class KeyValue(CIMXMLTest):
"""
<!ELEMENT KEYVALUE (#PCDATA)>
<!ATTLIST KEYVALUE
VALUETYPE (string|boolean|numeric) 'string'
%CIMType; #IMPLIED>
"""
def setUp(self):
super(KeyValue, self).setUp()
self.xml.append(cim_xml.KEYVALUE('dog', 'string'))
self.xml.append(cim_xml.KEYVALUE('2', 'numeric'))
self.xml.append(cim_xml.KEYVALUE('FALSE', 'boolean'))
self.xml.append(cim_xml.KEYVALUE('2', 'numeric', 'uint16'))
self.xml.append(cim_xml.KEYVALUE(None))
#################################################################
# 3.2.5. Object Definition Elements
#################################################################
# 3.2.5.1. CLASS
# 3.2.5.2. INSTANCE
# 3.2.5.3. QUALIFIER
# 3.2.5.4. PROPERTY
# 3.2.5.5. PROPERTY.ARRAY
# 3.2.5.6. PROPERTY.REFERENCE
# 3.2.5.7. METHOD
# 3.2.5.8. PARAMETER
# 3.2.5.9. PARAMETER.REFERENCE
# 3.2.5.10. PARAMETER.ARRAY
# 3.2.5.11. PARAMETER.REFARRAY
class Class(CIMXMLTest):
"""
<!ELEMENT CLASS (QUALIFIER*,(PROPERTY|PROPERTY.ARRAY|PROPERTY.REFERENCE)*,
METHOD*)>
<!ATTLIST CLASS
%CIMName;
%SuperClass;>
"""
def setUp(self):
super(Class, self).setUp()
# Empty
self.xml.append(cim_xml.CLASS('CIM_Foo'))
# PROPERTY
self.xml.append(cim_xml.CLASS(
'CIM_Foo',
properties=[cim_xml.PROPERTY('Dog', 'string',
cim_xml.VALUE('Spotty'))]))
# QUALIFIER + PROPERTY
self.xml.append(cim_xml.CLASS(
'CIM_Foo',
properties=[cim_xml.PROPERTY('Dog', 'string',
cim_xml.VALUE('Spotty'))],
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
# PROPERTY.ARRAY
self.xml.append(cim_xml.CLASS(
'CIM_Foo',
properties=[cim_xml.PROPERTY_ARRAY('Dogs', 'string', None)]))
# PROPERTY.REFERENCE
self.xml.append(cim_xml.CLASS(
'CIM_Foo',
properties=[cim_xml.PROPERTY_REFERENCE('Dogs', None)]))
# METHOD
self.xml.append(cim_xml.CLASS(
'CIM_Foo',
methods=[cim_xml.METHOD('FooMethod')]))
class Instance(CIMXMLTest):
"""
<!ELEMENT INSTANCE (QUALIFIER*,(PROPERTY|PROPERTY.ARRAY|
PROPERTY.REFERENCE)*)>
<!ATTLIST INSTANCE
%ClassName;
xml:lang NMTOKEN #IMPLIED>
"""
def setUp(self):
super(Instance, self).setUp()
# Empty
self.xml.append(cim_xml.INSTANCE('CIM_Foo', []))
# PROPERTY
self.xml.append(cim_xml.INSTANCE(
'CIM_Foo',
[cim_xml.PROPERTY('Dog', 'string', cim_xml.VALUE('Spotty')),
cim_xml.PROPERTY('Cat', 'string', cim_xml.VALUE('Bella'))]))
# PROPERTY + QUALIFIER
self.xml.append(cim_xml.INSTANCE(
'CIM_Foo',
properties=[cim_xml.PROPERTY('Dog', 'string',
cim_xml.VALUE('Spotty')),
cim_xml.PROPERTY('Cat', 'string',
cim_xml.VALUE('Bella'))],
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
# PROPERTY.ARRAY
self.xml.append(cim_xml.INSTANCE(
'CIM_Pets',
[cim_xml.PROPERTY_ARRAY(
'Dogs',
'string',
cim_xml.VALUE_ARRAY([cim_xml.VALUE('Spotty'),
cim_xml.VALUE('Bronte')])),
cim_xml.PROPERTY_ARRAY(
'Cats',
'string',
cim_xml.VALUE_ARRAY([cim_xml.VALUE('Bella'),
cim_xml.VALUE('Faux Lily')]))]))
# PROPERTY.REFERENCE
self.xml.append(cim_xml.INSTANCE(
'CIM_Pets',
[cim_xml.PROPERTY_REFERENCE(
'Dog',
cim_xml.VALUE_REFERENCE(cim_xml.CLASSNAME('CIM_Dog'))),
cim_xml.PROPERTY_REFERENCE(
'Cat',
cim_xml.VALUE_REFERENCE(cim_xml.CLASSNAME('CIM_Cat')))]))
class Qualifier(CIMXMLTest):
"""
<!ELEMENT QUALIFIER (VALUE | VALUE.ARRAY)>
<!ATTLIST QUALIFIER
%CIMName;
%CIMType; #REQUIRED
%Propagated;
%QualifierFlavor;
xml:lang NMTOKEN #IMPLIED>
"""
def setUp(self):
super(Qualifier, self).setUp()
# Note: DTD 2.2 allows qualifier to be empty
# VALUE
self.xml.append(cim_xml.QUALIFIER(
'IMPISH', 'string', cim_xml.VALUE('true')))
# VALUE + attributes
self.xml.append(cim_xml.QUALIFIER(
'Key', 'string', cim_xml.VALUE('true'),
overridable='true'))
self.xml.append(cim_xml.QUALIFIER(
'Description', 'string', cim_xml.VALUE('blahblah'),
translatable='true'))
self.xml.append(cim_xml.QUALIFIER(
'Version', 'string', cim_xml.VALUE('foorble'),
tosubclass='false', translatable='true'))
# VALUE.ARRAY
self.xml.append(cim_xml.QUALIFIER(
'LUCKYNUMBERS', 'uint32',
cim_xml.VALUE_ARRAY([cim_xml.VALUE('1'), cim_xml.VALUE('2')])))
class Property(CIMXMLTest):
"""
<!ELEMENT PROPERTY (QUALIFIER*,VALUE?)>
<!ATTLIST PROPERTY
%CIMName;
%CIMType; #REQUIRED
%ClassOrigin;
%Propagated;
xml:lang NMTOKEN #IMPLIED>
"""
def setUp(self):
super(Property, self).setUp()
# Empty
self.xml.append(cim_xml.PROPERTY('PropertyName', 'string', None))
# PROPERTY
self.xml.append(cim_xml.PROPERTY(
'PropertyName',
'string',
cim_xml.VALUE('dog')))
# PROPERTY + attributes
self.xml.append(cim_xml.PROPERTY(
'PropertyName',
'string',
cim_xml.VALUE('dog'),
propagated='true', class_origin='CIM_Pets'))
# PROPERTY + QUALIFIER
self.xml.append(cim_xml.PROPERTY(
'PropertyName',
'string',
cim_xml.VALUE('dog'),
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
class PropertyArray(CIMXMLTest):
"""
<!ELEMENT PROPERTY.ARRAY (QUALIFIER*,VALUE.ARRAY?)>
<!ATTLIST PROPERTY.ARRAY
%CIMName;
%CIMType; #REQUIRED
%ArraySize;
%ClassOrigin;
%Propagated;
xml:lang NMTOKEN #IMPLIED>
"""
def setUp(self):
super(PropertyArray, self).setUp()
# Empty
self.xml.append(cim_xml.PROPERTY_ARRAY('Dogs', 'string'))
# VALUE.ARRAY
self.xml.append(cim_xml.PROPERTY_ARRAY(
'Dogs',
'string',
cim_xml.VALUE_ARRAY([cim_xml.VALUE('Spotty'),
cim_xml.VALUE('Bronte')])))
# VALUE.ARRAY + attributes
self.xml.append(cim_xml.PROPERTY_ARRAY(
'Dogs',
'string',
cim_xml.VALUE_ARRAY([cim_xml.VALUE('Spotty'),
cim_xml.VALUE('Bronte')]),
array_size='2', class_origin='CIM_Dog'))
self.xml.append(cim_xml.PROPERTY_ARRAY('Dogs', 'string', None))
# QUALIFIER + VALUE.ARRAY
self.xml.append(cim_xml.PROPERTY_ARRAY(
'Dogs',
'string',
cim_xml.VALUE_ARRAY([cim_xml.VALUE('Spotty'),
cim_xml.VALUE('Bronte')]),
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
class PropertyReference(CIMXMLTest):
"""
<!ELEMENT PROPERTY.REFERENCE (QUALIFIER*,VALUE.REFERENCE?)>
<!ATTLIST PROPERTY.REFERENCE
%CIMName;
%ReferenceClass;
%ClassOrigin;
%Propagated;>
"""
def setUp(self):
super(PropertyReference, self).setUp()
# Empty
self.xml.append(cim_xml.PROPERTY_REFERENCE('Dogs', None))
# VALUE.REFERENCE
self.xml.append(cim_xml.PROPERTY_REFERENCE(
'Dogs',
cim_xml.VALUE_REFERENCE(cim_xml.CLASSNAME('CIM_Dog'))))
# VALUE.REFERENCE + attributes
self.xml.append(cim_xml.PROPERTY_REFERENCE(
'Dogs',
cim_xml.VALUE_REFERENCE(cim_xml.CLASSNAME('CIM_Dog')),
reference_class='CIM_Dog', class_origin='CIM_Dog',
propagated='true'))
# QUALIFIER + VALUE.REFERENCE
self.xml.append(cim_xml.PROPERTY_REFERENCE(
'Dogs',
cim_xml.VALUE_REFERENCE(cim_xml.CLASSNAME('CIM_Dog')),
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
class Method(CIMXMLTest):
"""
<!ELEMENT METHOD (QUALIFIER*,(PARAMETER|PARAMETER.REFERENCE|
PARAMETER.ARRAY|PARAMETER.REFARRAY)*)>
<!ATTLIST METHOD
%CIMName;
%CIMType; #IMPLIED
%ClassOrigin;
%Propagated;>
"""
def setUp(self):
super(Method, self).setUp()
# Empty
self.xml.append(cim_xml.METHOD('FooMethod'))
# PARAMETER
self.xml.append(cim_xml.METHOD(
'FooMethod',
[cim_xml.PARAMETER('arg', 'string')]))
# PARAMETER.REFERENCE
self.xml.append(cim_xml.METHOD(
'FooMethod',
[cim_xml.PARAMETER_REFERENCE('arg', 'CIM_Foo')]))
# PARAMETER.ARRAY
self.xml.append(cim_xml.METHOD(
'FooMethod',
[cim_xml.PARAMETER_ARRAY('arg', 'string')]))
# PARAMETER.REFARRAY
self.xml.append(cim_xml.METHOD(
'FooMethod',
[cim_xml.PARAMETER_REFARRAY('arg', 'CIM_Foo')]))
# PARAMETER + attributes
self.xml.append(cim_xml.METHOD(
'FooMethod',
[cim_xml.PARAMETER('arg', 'string')],
return_type='uint32',
class_origin='CIM_Foo',
propagated='true'))
# QUALIFIER + PARAMETER
self.xml.append(cim_xml.METHOD(
'FooMethod',
[cim_xml.PARAMETER('arg', 'string')],
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
class Parameter(CIMXMLTest):
"""
<!ELEMENT PARAMETER (QUALIFIER*)>
<!ATTLIST PARAMETER
%CIMName;
%CIMType; #REQUIRED>
"""
def setUp(self):
super(Parameter, self).setUp()
# Empty
self.xml.append(cim_xml.PARAMETER('arg', 'string'))
# QUALIFIER
self.xml.append(cim_xml.PARAMETER(
'arg',
'string',
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
class ParameterReference(CIMXMLTest):
"""
<!ELEMENT PARAMETER.REFERENCE (QUALIFIER*)>
<!ATTLIST PARAMETER.REFERENCE
%CIMName;
%ReferenceClass;>
"""
def setUp(self):
super(ParameterReference, self).setUp()
# Empty
self.xml.append(cim_xml.PARAMETER_REFERENCE('arg'))
# QUALIFIER + attributes
self.xml.append(cim_xml.PARAMETER_REFERENCE(
'arg',
reference_class='CIM_Foo',
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
class ParameterArray(CIMXMLTest):
"""
<!ELEMENT PARAMETER.ARRAY (QUALIFIER*)>
<!ATTLIST PARAMETER.ARRAY
%CIMName;
%CIMType; #REQUIRED
%ArraySize;>
"""
def setUp(self):
super(ParameterArray, self).setUp()
# Empty
self.xml.append(cim_xml.PARAMETER_ARRAY('arg', 'string'))
# QUALIFIERS + attributes
self.xml.append(cim_xml.PARAMETER_ARRAY(
'arg',
'string',
array_size='0',
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
class ParameterReferenceArray(CIMXMLTest):
"""
<!ELEMENT PARAMETER.REFARRAY (QUALIFIER*)>
<!ATTLIST PARAMETER.REFARRAY
%CIMName;
%ReferenceClass;
%ArraySize;>
"""
def setUp(self):
super(ParameterReferenceArray, self).setUp()
# Empty
self.xml.append(cim_xml.PARAMETER_REFARRAY('arg'))
# QUALIFIERS + attributes
self.xml.append(cim_xml.PARAMETER_REFARRAY(
'arg',
reference_class='CIM_Foo',
array_size='0',
qualifiers=[cim_xml.QUALIFIER('IMPISH', 'string',
cim_xml.VALUE('true'))]))
#################################################################
# 3.2.6. Message Elements
#################################################################
# 3.2.6.1. MESSAGE
# 3.2.6.2. MULTIREQ
# 3.2.6.3. SIMPLEREQ
# 3.2.6.4. METHODCALL
# 3.2.6.5. PARAMVALUE
# 3.2.6.6. IMETHODCALL
# 3.2.6.7. IPARAMVALUE
# 3.2.6.8. MULTIRSP
# 3.2.6.9. SIMPLERSP
# 3.2.6.10. METHODRESPONSE
# 3.2.6.11. IMETHODRESPONSE
# 3.2.6.12. ERROR
# 3.2.6.13. RETURNVALUE
# 3.2.6.14. IRETURNVALUE
# 3.2.6.15 MULTIEXPREQ
# 3.2.6.16 SIMPLEEXPREQ
# 3.2.6.17 EXPMETHODCALL
# 3.2.6.18 MULTIEXPRSP
# 3.2.6.19 SIMPLEEXPRSP
# 3.2.6.20 EXPMETHODRESPONSE
# 3.2.6.21 EXPPARAMVALUE
class Message(CIMXMLTest):
"""
<!ELEMENT MESSAGE (SIMPLEREQ | MULTIREQ | SIMPLERSP | MULTIRSP |
SIMPLEEXPREQ | MULTIEXPREQ | SIMPLEEXPRSP |
MULTIEXPRSP)>
<!ATTLIST MESSAGE
ID CDATA #REQUIRED
PROTOCOLVERSION CDATA #REQUIRED>
"""
def setUp(self):
super(Message, self).setUp()
# SIMPLEREQ
self.xml.append(cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'FooMethod',
sample_LOCALNAMESPACEPATH_node())),
'1001', '1.0'))
# MULTIREQ
self.xml.append(cim_xml.MESSAGE(
cim_xml.MULTIREQ(
[cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'FooMethod',
sample_LOCALNAMESPACEPATH_node())),
cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'FooMethod',
sample_LOCALNAMESPACEPATH_node()))]),
'1001', '1.0'))
# SIMPLERSP
self.xml.append(cim_xml.MESSAGE(
cim_xml.SIMPLERSP(
cim_xml.IMETHODRESPONSE('FooMethod')),
'1001', '1.0'))
# MULTIRSP
self.xml.append(cim_xml.MESSAGE(
cim_xml.MULTIRSP(
[cim_xml.SIMPLERSP(cim_xml.IMETHODRESPONSE('FooMethod')),
cim_xml.SIMPLERSP(cim_xml.IMETHODRESPONSE('FooMethod'))]),
'1001', '1.0'))
# TODO:
# SIMPLEEXPREQ
# MULTIEXPREQ
# SIMPLEEXPRSP
# MULTIEXPRSP
class MultiReq(CIMXMLTest):
"""
<!ELEMENT MULTIREQ (SIMPLEREQ, SIMPLEREQ+)>
"""
def setUp(self):
super(MultiReq, self).setUp()
self.xml.append(cim_xml.MULTIREQ(
[cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'FooMethod',
sample_LOCALNAMESPACEPATH_node())),
cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'FooMethod',
sample_LOCALNAMESPACEPATH_node()))]))
class MultiExpReq(CIMXMLTest):
"""
<!ELEMENT MULTIEXPREQ (SIMPLEEXPREQ, SIMPLEEXPREQ+)>
"""
def setUp(self):
super(MultiExpReq, self).setUp()
self.xml.append(cim_xml.MULTIEXPREQ(
[cim_xml.SIMPLEEXPREQ(cim_xml.EXPMETHODCALL('FooMethod')),
cim_xml.SIMPLEEXPREQ(cim_xml.EXPMETHODCALL('FooMethod'))]))
class SimpleReq(CIMXMLTest):
"""
<!ELEMENT SIMPLEREQ (IMETHODCALL | METHODCALL)>
"""
def setUp(self):
super(SimpleReq, self).setUp()
# IMETHODCALL
self.xml.append(cim_xml.SIMPLEREQ(
cim_xml.IMETHODCALL(
'FooIMethod',
sample_LOCALNAMESPACEPATH_node())))
# METHODCALL
self.xml.append(cim_xml.SIMPLEREQ(
cim_xml.METHODCALL(
'FooMethod',
cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_CLASSNAME_node()))))
class SimpleExpReq(CIMXMLTest):
"""
<!ELEMENT SIMPLEEXPREQ (EXPMETHODCALL)>
"""
def setUp(self):
super(SimpleExpReq, self).setUp()
self.xml.append(cim_xml.SIMPLEEXPREQ(
cim_xml.EXPMETHODCALL('FooMethod')))
class IMethodCall(CIMXMLTest):
"""
<!ELEMENT IMETHODCALL (LOCALNAMESPACEPATH, IPARAMVALUE*)>
<!ATTLIST IMETHODCALL
%CIMName;>
"""
def setUp(self):
super(IMethodCall, self).setUp()
self.xml.append(
cim_xml.IMETHODCALL('FooMethod', sample_LOCALNAMESPACEPATH_node()))
self.xml.append(cim_xml.IMETHODCALL(
'FooMethod2', sample_LOCALNAMESPACEPATH_node(),
[cim_xml.IPARAMVALUE('Dog', cim_xml.VALUE('Spottyfoot'))]))
class MethodCall(CIMXMLTest):
"""
<!ELEMENT METHODCALL ((LOCALINSTANCEPATH | LOCALCLASSPATH), PARAMVALUE*)>
<!ATTLIST METHODCALL
%CIMName;>
"""
def setUp(self):
super(MethodCall, self).setUp()
# LOCALINSTANCEPATH
self.xml.append(cim_xml.METHODCALL(
'FooMethod',
cim_xml.LOCALINSTANCEPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_INSTANCENAME_node())))
# LOCALCLASSPATH
self.xml.append(cim_xml.METHODCALL(
'FooMethod',
cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_CLASSNAME_node())))
# PARAMVALUEs
self.xml.append(cim_xml.METHODCALL(
'FooMethod',
cim_xml.LOCALINSTANCEPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_INSTANCENAME_node()),
[cim_xml.PARAMVALUE('Dog', cim_xml.VALUE('Spottyfoot'))]))
class ExpMethodCall(CIMXMLTest):
"""
<!ELEMENT EXPMETHODCALL (EXPPARAMVALUE*)>
<!ATTLIST EXPMETHODCALL
%CIMName;>
"""
def setUp(self):
super(ExpMethodCall, self).setUp()
self.xml.append(cim_xml.EXPMETHODCALL('FooMethod'))
self.xml.append(cim_xml.EXPMETHODCALL(
'FooMethod',
[cim_xml.EXPPARAMVALUE('Dog')]))
class ParamValue(CIMXMLTest):
"""
<!ELEMENT PARAMVALUE (VALUE | VALUE.REFERENCE | VALUE.ARRAY |
VALUE.REFARRAY)?>
<!ATTLIST PARAMVALUE
%CIMName;
%ParamType; #IMPLIED>
"""
def setUp(self):
super(ParamValue, self).setUp()
# Empty
self.xml.append(cim_xml.PARAMVALUE('Pet'))
# VALUE
self.xml.append(cim_xml.PARAMVALUE(
'Pet',
cim_xml.VALUE('Dog'),
'string'))
# VALUE.REFERENCE
self.xml.append(cim_xml.PARAMVALUE(
'Pet',
cim_xml.VALUE_REFERENCE(sample_CLASSPATH_node())))
# VALUE.ARRAY
self.xml.append(cim_xml.PARAMVALUE(
'Pet',
cim_xml.VALUE_ARRAY([])))
# VALUE.REFARRAY
self.xml.append(cim_xml.PARAMVALUE(
'Pet',
cim_xml.VALUE_REFARRAY([])))
class IParamValue(CIMXMLTest):
"""
<!ELEMENT IPARAMVALUE (VALUE | VALUE.ARRAY | VALUE.REFERENCE |
INSTANCENAME | CLASSNAME | QUALIFIER.DECLARATION |
CLASS | INSTANCE | VALUE.NAMEDINSTANCE)?>
<!ATTLIST IPARAMVALUE
%CIMName;>
"""
def setUp(self):
super(IParamValue, self).setUp()
# Empty
self.xml.append(cim_xml.IPARAMVALUE('Bird'))
# VALUE
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
cim_xml.VALUE('Dog')))
# VALUE.ARRAY
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
cim_xml.VALUE_ARRAY([])))
# VALUE.REFERENCE
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
cim_xml.VALUE_REFERENCE(sample_CLASSPATH_node())))
# INSTANCENAME
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
sample_INSTANCENAME_node()))
# CLASSNAME
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
sample_CLASSNAME_node()))
# TODO: QUALIFIER.DECLARATION
# CLASS
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
cim_xml.CLASS('CIM_Foo')))
# INSTANCE
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
cim_xml.INSTANCE('CIM_Pet', [])))
# VALUE.NAMEDINSTANCE
self.xml.append(cim_xml.IPARAMVALUE(
'Pet',
cim_xml.VALUE_NAMEDINSTANCE(
sample_INSTANCENAME_node(),
cim_xml.INSTANCE('CIM_Pet', []))))
class ExpParamValue(CIMXMLTest):
"""
<!ELEMENT EXPPARAMVALUE (INSTANCE? | VALUE? | METHODRESPONSE? |
IMETHODRESPONSE?)>
<!ATTLIST EXPPARAMVALUE
%CIMName;
%ParamType; #IMPLIED>
"""
def setUp(self):
super(ExpParamValue, self).setUp()
self.xml.append(cim_xml.EXPPARAMVALUE('FooParam'))
self.xml.append(cim_xml.EXPPARAMVALUE(
'FooParam',
cim_xml.INSTANCE('CIM_Pet', [])))
class MultiRsp(CIMXMLTest):
"""
<!ELEMENT MULTIRSP (SIMPLERSP, SIMPLERSP+)>
"""
def setUp(self):
super(MultiRsp, self).setUp()
self.xml.append(
cim_xml.MULTIRSP(
[cim_xml.SIMPLERSP(cim_xml.IMETHODRESPONSE('FooMethod')),
cim_xml.SIMPLERSP(cim_xml.IMETHODRESPONSE('FooMethod'))]))
class MultiExpRsp(CIMXMLTest):
"""
<!ELEMENT MULTIEXPRSP (SIMPLEEXPRSP, SIMPLEEXPRSP+)>
"""
def setUp(self):
super(MultiExpRsp, self).setUp()
self.xml.append(
cim_xml.MULTIEXPRSP(
[cim_xml.SIMPLEEXPRSP(
cim_xml.EXPMETHODRESPONSE('FooMethod')),
cim_xml.SIMPLEEXPRSP(
cim_xml.EXPMETHODRESPONSE('FooMethod'))]))
class SimpleRsp(CIMXMLTest):
"""
<!ELEMENT SIMPLERSP (METHODRESPONSE | IMETHODRESPONSE)>
"""
def setUp(self):
super(SimpleRsp, self).setUp()
# METHODRESPONSE
self.xml.append(
cim_xml.SIMPLERSP(cim_xml.METHODRESPONSE('FooMethod')))
# IMETHODRESPONSE
self.xml.append(
cim_xml.SIMPLERSP(cim_xml.IMETHODRESPONSE('FooMethod')))
class SimpleExpRsp(CIMXMLTest):
"""
<!ELEMENT SIMPLEEXPRSP (EXPMETHODRESPONSE)>
"""
def setUp(self):
super(SimpleExpRsp, self).setUp()
self.xml.append(
cim_xml.SIMPLEEXPRSP(cim_xml.EXPMETHODRESPONSE('FooMethod')))
class MethodResponse(CIMXMLTest):
"""
<!ELEMENT METHODRESPONSE (ERROR | (RETURNVALUE?, PARAMVALUE*))>
<!ATTLIST METHODRESPONSE
%CIMName;>
"""
def setUp(self):
super(MethodResponse, self).setUp()
# ERROR
self.xml.append(
cim_xml.METHODRESPONSE(
'FooMethod',
cim_xml.ERROR('123')))
# Empty
self.xml.append(cim_xml.METHODRESPONSE('FooMethod'))
# RETURNVALUE
self.xml.append(
cim_xml.METHODRESPONSE(
'FooMethod',
cim_xml.PARAMVALUE('Dog', cim_xml.VALUE('Spottyfoot'))))
# PARAMVALUE
self.xml.append(
cim_xml.METHODRESPONSE(
'FooMethod',
cim_xml.PARAMVALUE('Dog', cim_xml.VALUE('Spottyfoot'))))
# RETURNVALUE + PARAMVALUE
self.xml.append(
cim_xml.METHODRESPONSE(
'FooMethod',
(cim_xml.RETURNVALUE(cim_xml.VALUE('Dog')),
cim_xml.PARAMVALUE('Dog', cim_xml.VALUE('Spottyfoot')))))
class ExpMethodResponse(CIMXMLTest):
"""
<!ELEMENT EXPMETHODRESPONSE (ERROR | IRETURNVALUE?)>
<!ATTLIST EXPMETHODRESPONSE
%CIMName;>
"""
def setUp(self):
super(ExpMethodResponse, self).setUp()
# Empty
self.xml.append(cim_xml.EXPMETHODRESPONSE('FooMethod'))
# ERROR
self.xml.append(cim_xml.EXPMETHODRESPONSE(
'FooMethod',
cim_xml.ERROR('123')))
# IRETURNVALUE
self.xml.append(cim_xml.EXPMETHODRESPONSE(
'FooMethod',
cim_xml.IRETURNVALUE(cim_xml.VALUE('Dog'))))
class IMethodResponse(CIMXMLTest):
"""
<!ELEMENT IMETHODRESPONSE (ERROR | IRETURNVALUE?)>
<!ATTLIST IMETHODRESPONSE
%CIMName;>
"""
def setUp(self):
super(IMethodResponse, self).setUp()
# Empty
self.xml.append(cim_xml.IMETHODRESPONSE('FooMethod'))
# ERROR
self.xml.append(cim_xml.IMETHODRESPONSE(
'FooMethod',
cim_xml.ERROR('123')))
# IRETURNVALUE
self.xml.append(cim_xml.IMETHODRESPONSE(
'FooMethod',
cim_xml.IRETURNVALUE(cim_xml.VALUE('Dog'))))
class Error(CIMXMLTest):
"""
<!ELEMENT ERROR (INSTANCE*)>
<!ATTLIST ERROR
CODE CDATA #REQUIRED
DESCRIPTION CDATA #IMPLIED>
"""
def setUp(self):
super(Error, self).setUp()
self.xml.append(cim_xml.ERROR('1'))
self.xml.append(cim_xml.ERROR('1', 'Foo not found'))
# TODO: INSTANCE*
class ReturnValue(CIMXMLTest):
"""
<!ELEMENT RETURNVALUE (VALUE | VALUE.REFERENCE)>
<!ATTLIST RETURNVALUE
%ParamType; #IMPLIED>
"""
def setUp(self):
super(ReturnValue, self).setUp()
# VALUE
self.xml.append(cim_xml.RETURNVALUE(cim_xml.VALUE('Dog')))
# VALUE.REFERENCE
self.xml.append(cim_xml.RETURNVALUE(
cim_xml.VALUE_REFERENCE(sample_CLASSPATH_node())))
# TODO: PARAMTYPE
class IReturnValue(CIMXMLTest):
"""
<!ELEMENT IRETURNVALUE (CLASSNAME* | INSTANCENAME* | VALUE* |
VALUE.OBJECTWITHPATH* |
VALUE.OBJECTWITHLOCALPATH* | VALUE.OBJECT* |
OBJECTPATH* | QUALIFIER.DECLARATION* |
VALUE.ARRAY? | VALUE.REFERENCE? | CLASS* |
INSTANCE* | VALUE.NAMEDINSTANCE*)>
"""
def setUp(self):
super(IReturnValue, self).setUp()
# Empty
self.xml.append(cim_xml.IRETURNVALUE(None))
# CLASSNAME
self.xml.append(cim_xml.IRETURNVALUE(
sample_CLASSNAME_node()))
# INSTANCENAME
self.xml.append(cim_xml.IRETURNVALUE(
sample_INSTANCENAME_node()))
# VALUE
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.VALUE('Dog')))
# VALUE.OBJECTWITHPATH
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.VALUE_OBJECTWITHPATH(
sample_CLASSPATH_node(),
cim_xml.CLASS('CIM_Foo'))))
# VALUE.OBJECTWITHLOCALPATH
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.VALUE_OBJECTWITHLOCALPATH(
cim_xml.LOCALCLASSPATH(
sample_LOCALNAMESPACEPATH_node(),
sample_CLASSNAME_node()),
cim_xml.CLASS('CIM_Foo'))))
# VALUE.OBJECT
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.VALUE_OBJECT(cim_xml.INSTANCE('CIM_Pet', []))))
# OBJECTPATH
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.OBJECTPATH(
cim_xml.INSTANCEPATH(
sample_NAMESPACEPATH_node(),
sample_INSTANCENAME_node()))))
# TODO: QUALIFIER.DECLARATION
# VALUE.ARRAY
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.VALUE_ARRAY([])))
# VALUE.REFERENCE
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.VALUE_REFERENCE(sample_CLASSPATH_node())))
# CLASS
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.CLASS('CIM_Foo')))
# INSTANCE
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.INSTANCE('CIM_Pet', [])))
# VALUE.NAMEDINSTANCE
self.xml.append(cim_xml.IRETURNVALUE(
cim_xml.VALUE_NAMEDINSTANCE(
sample_INSTANCENAME_node(),
cim_xml.INSTANCE('CIM_Pet', []))))
| lgpl-2.1 | 1,312,035,014,946,303,000 | 25.890708 | 79 | 0.544654 | false |
FuegoFro/mongo-web-shell | mongows/mws/views.py | 7 | 12296 | # Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
from functools import update_wrapper
import uuid
from bson import BSON
from bson.json_util import dumps, loads
from flask import Blueprint, current_app, make_response, request
from flask import session
from mongows.mws.MWSServerError import MWSServerError
from pymongo.errors import OperationFailure
from mongows.mws.db import get_db
from mongows.mws.util import (
UseResId,
get_collection_names,
get_internal_coll_name
)
mws = Blueprint('mws', __name__, url_prefix='/mws')
CLIENTS_COLLECTION = 'clients'
@mws.after_request
def no_cache(response):
response.cache_control.no_cache = True
response.headers['Expires'] = 0
return response
# TODO: Look over this method; remove unnecessary bits, check convention, etc.
# via http://flask.pocoo.org/snippets/56/
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if isinstance(headers, list):
headers = ', '.join(x.upper() for x in headers)
if isinstance(origin, list):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
cors_origin = origin or current_app.config.get('CORS_ORIGIN', '')
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = cors_origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
else:
reqh = request.headers.get('Access-Control-Request-Headers')
h['Access-Control-Allow-Headers'] = reqh
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def check_session_id(f):
def wrapped_function(*args, **kwargs):
session_id = session.get('session_id')
if session_id is None:
raise MWSServerError(401, 'There is no session_id cookie')
if not user_has_access(kwargs['res_id'], session_id):
error = 'Session error. User does not have access to res_id'
raise MWSServerError(403, error)
return f(*args, **kwargs)
return update_wrapper(wrapped_function, f)
def ratelimit(f):
def wrapped_function(*args, **kwargs):
session_id = session.get('session_id')
if session_id is None:
error = 'Cannot rate limit without session_id cookie'
raise MWSServerError(401, error)
config = current_app.config
coll = get_db()[config['RATELIMIT_COLLECTION']]
coll.insert({'session_id': session_id, 'timestamp': datetime.now()})
delta = timedelta(seconds=config['RATELIMIT_EXPIRY'])
expiry = datetime.now() - delta
accesses = coll.find({'session_id': session_id,
'timestamp': {'$gt': expiry}})
if accesses.count() > config['RATELIMIT_QUOTA']:
raise MWSServerError(429, 'Rate limit exceeded')
return f(*args, **kwargs)
return update_wrapper(wrapped_function, f)
@mws.route('/', methods=['POST', 'OPTIONS'])
@crossdomain()
def create_mws_resource():
session_id = session.get('session_id', str(uuid.uuid4()))
session['session_id'] = session_id
clients = get_db()[CLIENTS_COLLECTION]
cursor = clients.find({'session_id': session_id}, {'res_id': 1, '_id': 0})
if cursor.count():
# TODO: handle multiple res_id per session
res_id = cursor[0]['res_id']
is_new = False
else:
res_id = generate_res_id()
clients.insert({
'version': 1,
'res_id': res_id,
'collections': [],
'session_id': session_id,
'timestamp': datetime.now()
})
is_new = True
return to_json({'res_id': res_id, 'is_new': is_new})
@mws.route('/<res_id>/keep-alive', methods=['POST', 'OPTIONS'])
@crossdomain()
@check_session_id
def keep_mws_alive(res_id):
clients = get_db()[CLIENTS_COLLECTION]
clients.update({'session_id': session.get('session_id'), 'res_id': res_id},
{'$set': {'timestamp': datetime.now()}})
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/find', methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_find(res_id, collection_name):
# TODO: Should we specify a content type? Then we have to use an options
# header, and we should probably get the return type from the content-type
# header.
parse_get_json(request)
query = request.json.get('query')
projection = request.json.get('projection')
skip = request.json.get('skip', 0)
limit = request.json.get('limit', 0)
with UseResId(res_id):
coll = get_db()[collection_name]
cursor = coll.find(query, projection, skip, limit)
documents = list(cursor)
return to_json({'result': documents})
@mws.route('/<res_id>/db/<collection_name>/insert',
methods=['POST', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_insert(res_id, collection_name):
# TODO: Ensure request.json is not None.
if 'document' in request.json:
document = request.json['document']
else:
error = '\'document\' argument not found in the insert request.'
raise MWSServerError(400, error)
# Check quota
size = get_collection_size(res_id, collection_name)
# Handle inserting both a list of docs or a single doc
if isinstance(document, list):
req_size = 0
for d in document:
req_size += len(BSON.encode(d))
else:
req_size = len(BSON.encode(document))
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
# Insert document
with UseResId(res_id):
get_db()[collection_name].insert(document)
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/remove',
methods=['DELETE', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_remove(res_id, collection_name):
constraint = request.json.get('constraint') if request.json else {}
just_one = request.json and request.json.get('just_one', False)
with UseResId(res_id):
collection = get_db()[collection_name]
if just_one:
collection.find_and_modify(constraint, remove=True)
else:
collection.remove(constraint)
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/update', methods=['PUT', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_update(res_id, collection_name):
query = update = None
if request.json:
query = request.json.get('query')
update = request.json.get('update')
upsert = request.json.get('upsert', False)
multi = request.json.get('multi', False)
if query is None or update is None:
error = 'update requires spec and document arguments'
raise MWSServerError(400, error)
# Check quota
size = get_collection_size(res_id, collection_name)
with UseResId(res_id):
# Computation of worst case size increase - update size * docs affected
# It would be nice if we were able to make a more conservative estimate
# of the space difference that an update will cause. (especially if it
# results in smaller documents)
db = get_db()
affected = db[collection_name].find(query).count()
req_size = len(BSON.encode(update)) * affected
if size + req_size > current_app.config['QUOTA_COLLECTION_SIZE']:
raise MWSServerError(403, 'Collection size exceeded')
db[collection_name].update(query, update, upsert, multi=multi)
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/aggregate',
methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
def db_collection_aggregate(res_id, collection_name):
parse_get_json(request)
try:
with UseResId(res_id):
coll = get_db()[collection_name]
result = coll.aggregate(request.json)
return to_json(result)
except OperationFailure as e:
raise MWSServerError(400, e.message)
@mws.route('/<res_id>/db/<collection_name>/drop',
methods=['DELETE', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_drop(res_id, collection_name):
with UseResId(res_id):
get_db().drop_collection(collection_name)
return empty_success()
@mws.route('/<res_id>/db/<collection_name>/count', methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
@ratelimit
def db_collection_count(res_id, collection_name):
parse_get_json(request)
query = request.json.get('query')
skip = request.json.get('skip', 0)
limit = request.json.get('limit', 0)
use_skip_limit = bool(skip or limit)
with UseResId(res_id):
coll = get_db()[collection_name]
cursor = coll.find(query, skip=skip, limit=limit)
count = cursor.count(use_skip_limit)
return to_json({'count': count})
@mws.route('/<res_id>/db/getCollectionNames',
methods=['GET', 'OPTIONS'])
@crossdomain()
@check_session_id
def db_get_collection_names(res_id):
return to_json({'result': get_collection_names(res_id)})
@mws.route('/<res_id>/db',
methods=['DELETE', 'OPTIONS'])
@crossdomain()
@check_session_id
def db_drop(res_id):
DB = get_db()
collections = get_collection_names(res_id)
with UseResId(res_id):
for c in collections:
DB.drop_collection(c)
return empty_success()
def generate_res_id():
return str(uuid.uuid4())
def user_has_access(res_id, session_id):
query = {'res_id': res_id, 'session_id': session_id}
coll = get_db()[CLIENTS_COLLECTION]
return_value = coll.find_one(query)
return False if return_value is None else True
def to_json(result):
try:
return dumps(result), 200
except ValueError:
error = 'Error in find while trying to convert the results to ' + \
'JSON format.'
raise MWSServerError(500, error)
def empty_success():
return '', 204
def parse_get_json(request):
try:
request.json = loads(request.args.keys()[0])
except ValueError:
raise MWSServerError(400, 'Error parsing JSON data',
'Invalid GET parameter data')
def get_collection_size(res_id, collection_name):
coll = get_internal_coll_name(res_id, collection_name)
try:
return get_db().command({'collstats': coll})['size']
except OperationFailure as e:
if 'ns not found' in e.message:
return 0
else:
raise MWSServerError(500, e.message)
| apache-2.0 | -6,339,074,579,026,056,000 | 31.702128 | 79 | 0.630774 | false |
cemoody/chainer | examples/mnist/train_mnist.py | 1 | 4391 | #!/usr/bin/env python
"""Chainer example: train a multi-layer perceptron on MNIST
This is a minimal example to write a feed-forward net.
"""
from __future__ import print_function
import argparse
import numpy as np
import six
import chainer
from chainer import computational_graph
from chainer import cuda
import chainer.links as L
from chainer import optimizers
from chainer import serializers
import data
import net
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--initmodel', '-m', default='',
help='Initialize the model from given file')
parser.add_argument('--resume', '-r', default='',
help='Resume the optimization from snapshot')
parser.add_argument('--net', '-n', choices=('simple', 'parallel'),
default='simple', help='Network type')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', default=20, type=int,
help='number of epochs to learn')
parser.add_argument('--unit', '-u', default=1000, type=int,
help='number of units')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='learning minibatch size')
args = parser.parse_args()
batchsize = args.batchsize
n_epoch = args.epoch
n_units = args.unit
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('Network type: {}'.format(args.net))
print('')
# Prepare dataset
print('load MNIST dataset')
mnist = data.load_mnist_data()
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
mnist['target'] = mnist['target'].astype(np.int32)
N = 60000
x_train, x_test = np.split(mnist['data'], [N])
y_train, y_test = np.split(mnist['target'], [N])
N_test = y_test.size
# Prepare multi-layer perceptron model, defined in net.py
if args.net == 'simple':
model = L.Classifier(net.MnistMLP(784, n_units, 10))
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
xp = np if args.gpu < 0 else cuda.cupy
elif args.net == 'parallel':
cuda.check_cuda_available()
model = L.Classifier(net.MnistMLPParallel(784, n_units, 10))
xp = cuda.cupy
# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)
# Init/Resume
if args.initmodel:
print('Load model from', args.initmodel)
serializers.load_npz(args.initmodel, model)
if args.resume:
print('Load optimizer state from', args.resume)
serializers.load_npz(args.resume, optimizer)
# Learning loop
for epoch in six.moves.range(1, n_epoch + 1):
print('epoch', epoch)
# training
perm = np.random.permutation(N)
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N, batchsize):
x = chainer.Variable(xp.asarray(x_train[perm[i:i + batchsize]]))
t = chainer.Variable(xp.asarray(y_train[perm[i:i + batchsize]]))
# Pass the loss function (Classifier defines it) and its arguments
optimizer.update(model, x, t)
if epoch == 1 and i == 0:
with open('graph.dot', 'w') as o:
g = computational_graph.build_computational_graph(
(model.loss, ))
o.write(g.dump())
print('graph generated')
sum_loss += float(model.loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
print('train mean loss={}, accuracy={}'.format(
sum_loss / N, sum_accuracy / N))
# evaluation
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N_test, batchsize):
x = chainer.Variable(xp.asarray(x_test[i:i + batchsize]),
volatile='on')
t = chainer.Variable(xp.asarray(y_test[i:i + batchsize]),
volatile='on')
loss = model(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
print('test mean loss={}, accuracy={}'.format(
sum_loss / N_test, sum_accuracy / N_test))
# Save the model and the optimizer
print('save the model')
serializers.save_npz('mlp.model', model)
print('save the optimizer')
serializers.save_npz('mlp.state', optimizer)
| mit | 1,720,002,836,869,415,200 | 31.768657 | 74 | 0.63038 | false |
architecture-building-systems/CEAforArcGIS | cea/tests/run_all_plots.py | 2 | 1790 | """
Load all the plot classes and generate the plots for test purposes
(This is run at the end of ``cea test --workflow slow`` as it requires a scenario with all the data from the
whole workflow to be present)
"""
import shutil
import tempfile
import cea.plots
import cea.plots.cache
import cea.config
import cea.workflows.workflow
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
def main(config):
cache_folder = tempfile.mkdtemp()
plot_cache = cea.plots.cache.MemoryPlotCache(cache_folder)
with config.ignore_restrictions():
if config.plots_supply_system.system == "_sys_today_":
# BUGFIX: _sys_today_ not supported
config.plots_supply_system.system = ""
try:
for category in cea.plots.categories.list_categories(plugins=[]):
# create the new dashboard
print("Plotting category {category}".format(category=category.label))
for plot_class in category.plots:
print("- Plotting {plot_class}".format(plot_class=plot_class.__name__))
parameters = {k: config.get(v) for k, v in plot_class.expected_parameters.items() }
plot = plot_class(config.project, parameters, plot_cache)
print(" - plotting to {output_path}".format(output_path=plot.output_path))
plot.plot()
print(" - plotting div (len={len})".format(len=len(plot.plot_div())))
finally:
shutil.rmtree(cache_folder, ignore_errors=True)
if __name__ == "__main__":
main(cea.config.Configuration()) | mit | -8,398,397,072,217,587,000 | 31.563636 | 108 | 0.637989 | false |
michaelpacer/scikit-image | doc/source/conf.py | 6 | 9627 | # -*- coding: utf-8 -*-
#
# skimage documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 22 13:00:30 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, '..', 'ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.autosummary', 'plot2rst',
'sphinx.ext.intersphinx']
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skimage'
copyright = '2013, the scikit-image team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
setup_lines = open('../../skimage/__init__.py').readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('__version__'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-image'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'skimage v%s docs' % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['navigation.html',
'localtoc.html',
'versions.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikitimagedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'scikit-image.tex', u'The scikit-image Documentation',
u'scikit-image development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{enumitem}
\setlistdepth{100}
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_basedir = os.path.join(curpath, "plots")
plot_pre_code = """
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
import matplotlib
matplotlib.rcParams.update({
'font.size': 14,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
"""
plot_include_source = True
plot_formats = [('png', 100), ('pdf', 100)]
plot2rst_index_name = 'README'
plot2rst_rcparams = {'image.cmap' : 'gray',
'image.interpolation' : 'none'}
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
_python_doc_base = 'http://docs.python.org/2.7'
intersphinx_mapping = {
_python_doc_base: None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://scikit-learn.org/stable': None
}
| bsd-3-clause | 6,259,562,681,117,678,000 | 31.305369 | 80 | 0.667705 | false |
DrKylstein/Media-Gizmo | text_display.py | 1 | 2554 | # Copyright (c) 2014 Kyle Delaney
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the project's author nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#from unidecode import unidecode
class LCD(object):
_table = [
('°F',0x1B),
('°C',0x1A),
('°',0xDF),
('&sun;',0x94),
('&rain;',0xDE),
('&cloud;',0x8E),
('&storm;',0x8F),
('&',0x26),
('~',0x8E)
]
def __init__(self, tty):
self._serial = tty
def _translate_text(self, text):
#text = unidecode(text).encode('ascii', errors='ignore')
for pair in self._table:
text = text.replace(pair[0], chr(pair[1]))
return text
def change_track(self, title, artist):
self.change_title(title)
self.change_artist(artist)
def change_title(self, title):
self._serial.write('T{}\n'.format(self._translate_text(title)))
def change_artist(self, artist):
self._serial.write('A{}\n'.format(self._translate_text(artist)))
def clear(self):
self._serial.write('C\n')
| bsd-3-clause | -5,620,178,843,297,378,000 | 36.573529 | 75 | 0.668755 | false |
jvican/dumbq | client/utils/test_utils.py | 1 | 1138 | # -*- coding: utf-8 -*-
# DumbQ 2.0 - A lightweight job scheduler
# Copyright (C) 2015-2016 Jorge Vicente Cantero, CERN
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""These utils are required by assert functions imported from unittest 2.7."""
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < 80:
return result
return result[:80] + ' [truncated]...'
| gpl-2.0 | -4,158,798,769,107,847,000 | 36.933333 | 78 | 0.717926 | false |
horance-liu/tensorflow | tensorflow/python/ops/distributions/kullback_leibler.py | 35 | 5456 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registration and usage mechanisms for KL-divergences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_inspect
_DIVERGENCES = {}
__all__ = [
"RegisterKL",
"kl_divergence",
]
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(distribution_a || distribution_b) registered for distribution_a "
"type %s and distribution_b type %s"
% (type(distribution_a).__name__, type(distribution_b).__name__))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan_stats=False). Values:"
% (distribution_a.name, distribution_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
class RegisterKL(object):
"""Decorator to register a KL divergence implementation function.
Usage:
@distributions.RegisterKL(distributions.Normal, distributions.Normal)
def _kl_normal_mvn(norm_a, norm_b):
# Return KL(norm_a || norm_b)
"""
def __init__(self, dist_cls_a, dist_cls_b):
"""Initialize the KL registrar.
Args:
dist_cls_a: the class of the first argument of the KL divergence.
dist_cls_b: the class of the second argument of the KL divergence.
"""
self._key = (dist_cls_a, dist_cls_b)
def __call__(self, kl_fn):
"""Perform the KL registration.
Args:
kl_fn: The function to use for the KL divergence.
Returns:
kl_fn
Raises:
TypeError: if kl_fn is not a callable.
ValueError: if a KL divergence function has already been registered for
the given argument classes.
"""
if not callable(kl_fn):
raise TypeError("kl_fn must be callable, received: %s" % kl_fn)
if self._key in _DIVERGENCES:
raise ValueError("KL(%s || %s) has already been registered to: %s"
% (self._key[0].__name__, self._key[1].__name__,
_DIVERGENCES[self._key]))
_DIVERGENCES[self._key] = kl_fn
return kl_fn
| apache-2.0 | 258,418,001,918,616,160 | 34.660131 | 80 | 0.669538 | false |
dchud/unalog2 | settings.py | 1 | 3333 | # Django settings for unalog2 project.
from os.path import abspath, dirname
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Your name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
DEFAULT_CHARSET = 'utf-8'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'SET THIS TO A LONG, RANDOM STRING, WITH SOME @#$! CHARS TOO'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/PATH/TO/YOUR/COPY/OF/unalog2/base/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'base',
)
AUTH_PROFILE_MODULE = 'base.userprofile'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
UNALOG_ROOT = dirname(abspath(__file__))
# Set to a unique realm for authentication
REALM = "unalog CHANGEME"
# Set to your local site's host:port, *not* including trailing slash
UNALOG_URL = 'http://localhost:8000'
SOLR_URL = 'http://localhost:8983/solr'
# Be sure to create your own 'local_settings.py' file as described in README.txt
try:
from local_settings import *
except ImportError:
pass
| mit | 5,551,549,108,930,240,000 | 27.487179 | 88 | 0.70207 | false |
HalcyonChimera/osf.io | osf/migrations/0069_auto_20171127_1119.py | 20 | 2240 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-27 17:19
from __future__ import unicode_literals
import logging
from django.db import migrations
from osf.utils.migrations import disable_auto_now_fields
logger = logging.getLogger(__name__)
def add_preprint_doi_created(state, schema):
"""
Sets preprint_doi_created equal to date_published for existing published preprints.
"""
PreprintService = state.get_model('osf', 'preprintservice')
null_preprint_doi_created = PreprintService.objects.filter(preprint_doi_created__isnull=True, date_published__isnull=False)
preprints_count = null_preprint_doi_created.count()
current_preprint = 0
logger.info('{} published preprints found with preprint_doi_created is null.'.format(preprints_count))
ContentType = state.get_model('contenttypes', 'ContentType')
Identifier = state.get_model('osf', 'identifier')
with disable_auto_now_fields(models=[PreprintService]):
for preprint in null_preprint_doi_created:
current_preprint += 1
content_type = ContentType.objects.get_for_model(preprint)
if Identifier.objects.filter(object_id=preprint.id, category='doi', content_type=content_type).exists():
preprint.preprint_doi_created = preprint.date_published
preprint.save()
logger.info('Preprint ID {}, {}/{} preprint_doi_created field populated.'.format(preprint.id, current_preprint, preprints_count))
else:
logger.info('Preprint ID {}, {}/{} skipped because a DOI has not been created.'.format(preprint.id, current_preprint, preprints_count))
def reverse_func(state, schema):
"""
Reverses data migration. Sets preprint_doi_created field back to null.
"""
PreprintService = state.get_model('osf', 'preprintservice')
logger.info('Reversing preprint_doi_created migration.')
PreprintService.objects.filter(preprint_doi_created__isnull=False).update(preprint_doi_created=None)
class Migration(migrations.Migration):
dependencies = [
('osf', '0068_preprintservice_preprint_doi_created'),
]
operations = [
migrations.RunPython(add_preprint_doi_created, reverse_func)
]
| apache-2.0 | 2,025,173,990,394,870,000 | 43.8 | 151 | 0.695536 | false |
jabesq/home-assistant | tests/components/demo/test_geo_location.py | 7 | 2852 | """The tests for the demo platform."""
import unittest
from unittest.mock import patch
from homeassistant.components import geo_location
from homeassistant.components.demo.geo_location import \
NUMBER_OF_DEMO_DEVICES, DEFAULT_UNIT_OF_MEASUREMENT, \
DEFAULT_UPDATE_INTERVAL
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant, assert_setup_component, \
fire_time_changed
import homeassistant.util.dt as dt_util
CONFIG = {
geo_location.DOMAIN: [
{
'platform': 'demo'
}
]
}
class TestDemoPlatform(unittest.TestCase):
"""Test the demo platform."""
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_platform(self):
"""Test setup of demo platform via configuration."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
with assert_setup_component(1, geo_location.DOMAIN):
assert setup_component(self.hass, geo_location.DOMAIN, CONFIG)
self.hass.block_till_done()
# In this test, one zone and geolocation entities have been
# generated.
all_states = [self.hass.states.get(entity_id) for entity_id
in self.hass.states.entity_ids(geo_location.DOMAIN)]
assert len(all_states) == NUMBER_OF_DEMO_DEVICES
for state in all_states:
# Check a single device's attributes.
if state.domain != geo_location.DOMAIN:
# ignore home zone state
continue
assert abs(
state.attributes['latitude'] -
self.hass.config.latitude
) < 1.0
assert abs(
state.attributes['longitude'] -
self.hass.config.longitude
) < 1.0
assert state.attributes['unit_of_measurement'] == \
DEFAULT_UNIT_OF_MEASUREMENT
# Update (replaces 1 device).
fire_time_changed(self.hass, utcnow + DEFAULT_UPDATE_INTERVAL)
self.hass.block_till_done()
# Get all states again, ensure that the number of states is still
# the same, but the lists are different.
all_states_updated = [
self.hass.states.get(entity_id) for entity_id
in self.hass.states.entity_ids(geo_location.DOMAIN)]
assert len(all_states_updated) == NUMBER_OF_DEMO_DEVICES
assert all_states != all_states_updated
| apache-2.0 | 6,124,653,382,466,511,000 | 37.540541 | 78 | 0.593969 | false |
ianblenke/awsebcli | ebcli/docker/dockerrun.py | 1 | 5119 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..core import fileoperations
from ..objects.exceptions import ValidationError
from ..resources.strings import strings
AUTH_BUCKET_KEY = 'Bucket'
AUTH_KEY = 'Authentication'
AUTHKEY_KEY = 'Key'
CONTAINER_PORT_KEY = 'ContainerPort'
IMG_NAME_KEY = 'Name'
IMG_KEY = 'Image'
IMG_UPDATE_KEY = 'Update'
JSON_FALSE = 'false'
LOGGING_KEY = 'Logging'
PORTS_KEY = 'Ports'
VERSION_ONE = '1'
VERSION_KEY = 'AWSEBDockerrunVersion'
VERSION_TWO = '2'
def validate_dockerrun_v1(dockerrun, is_used_to_make_dockerfile):
"""
Validates given Dockerrun.aws.json version, and that if no Dockerfile
exists, Image.Name and Ports[0].ContainerPort exists.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:param is_used_to_make_dockerfile: bool: whether used to make Dockerfile
:return: None
"""
if dockerrun is None:
return
if _get_version(dockerrun) != VERSION_ONE:
raise ValidationError(strings['local.invaliddockerrunversion'])
if not is_used_to_make_dockerfile:
return
if IMG_KEY not in dockerrun or IMG_NAME_KEY not in dockerrun[IMG_KEY]:
raise ValidationError(strings['local.missingdockerrunimage'])
elif PORTS_KEY not in dockerrun:
raise ValidationError(strings['local.missingdockerrunports'])
elif CONTAINER_PORT_KEY not in dockerrun[PORTS_KEY][0]:
raise ValidationError(strings['local.missingdockerruncontainerport'])
def validate_dockerrun_v2(dockerrun):
if dockerrun is None:
raise ValidationError(strings['local.missingdockerrun'])
elif _get_version(dockerrun) != VERSION_TWO:
raise ValidationError(strings['local.invaliddockerrunversion'])
def require_docker_pull(dockerrun):
"""
Whether 'docker pull' is necessary. Return True if and only if
Dockerrun.aws.json Image.Update value is not false.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: bool
"""
return (dockerrun is None or IMG_KEY not in dockerrun or
dockerrun[IMG_KEY].get(IMG_UPDATE_KEY) != JSON_FALSE)
def get_dockerrun(dockerrun_path):
"""
Return dict representation of Dockerrun.aws.json in dockerrun_path
Return None if Dockerrun doesn't exist at that path.
:param dockerrun_path: str: full path to Dockerrun.aws.json
:return: dict
"""
try:
return fileoperations.get_json_dict(dockerrun_path)
except ValueError:
raise ValidationError(strings['local.invalidjson'])
except IOError: # Dockerrun.aws.json doesn't exist
return None
def require_auth_download(dockerrun):
"""
Return whether Authentication.Key and Authentication.Bucket is provided
in Dockerrun.aws.json, in which case we have to pull down the bucket.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: bool
"""
if dockerrun is None:
return False
try:
get_auth_key(dockerrun)
get_auth_bucket_name(dockerrun)
return True
except KeyError:
return False
def get_auth_key(dockerrun):
"""
Get Authentication.Key value of dockerrun.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: str
"""
if _get_version(dockerrun) == VERSION_ONE:
authkey_key = AUTHKEY_KEY
else:
authkey_key = AUTHKEY_KEY.lower()
return _get_auth(dockerrun)[authkey_key]
def get_auth_bucket_name(dockerrun):
"""
Get Authentication.Bucket value of dockerrun.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: str
"""
if _get_version(dockerrun) == VERSION_ONE:
auth_bucket_key = AUTH_BUCKET_KEY
else:
auth_bucket_key = AUTH_BUCKET_KEY.lower()
return _get_auth(dockerrun)[auth_bucket_key]
def get_logdir(dockerrun):
"""
Get Logging value of dockerrun.
:param dockerrun: dict: dictionary representation of Dockerrun.aws.json
:return: str
"""
return dockerrun.get(LOGGING_KEY) if dockerrun else None
def get_base_img(dockerrun):
return dockerrun[IMG_KEY][IMG_NAME_KEY]
def get_exposed_port(dockerrun):
return dockerrun[PORTS_KEY][0][CONTAINER_PORT_KEY]
def _get_auth(dockerrun):
if _get_version(dockerrun) == VERSION_ONE:
auth_key = AUTH_KEY
else:
auth_key = AUTH_KEY.lower()
return dockerrun[auth_key]
def _get_version(dockerrun):
if VERSION_KEY in dockerrun:
return str(dockerrun[VERSION_KEY])
else:
return None
| apache-2.0 | 6,774,757,003,490,146,000 | 28.085227 | 77 | 0.697793 | false |
nburn42/tensorflow | tensorflow/contrib/slim/python/slim/data/parallel_reader_test.py | 51 | 7874 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.parallel_reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim.data import parallel_reader
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import supervisor
class ParallelReaderTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def _verify_all_data_sources_read(self, shared_queue):
with self.test_session():
tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
num_readers = len(tfrecord_paths)
p_reader = parallel_reader.ParallelReader(
io_ops.TFRecordReader, shared_queue, num_readers=num_readers)
data_files = parallel_reader.get_data_files(tfrecord_paths)
filename_queue = input_lib.string_input_producer(data_files)
key, value = p_reader.read(filename_queue)
count0 = 0
count1 = 0
count2 = 0
num_reads = 50
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if '0-of-3' in str(current_key):
count0 += 1
if '1-of-3' in str(current_key):
count1 += 1
if '2-of-3' in str(current_key):
count2 += 1
self.assertGreater(count0, 0)
self.assertGreater(count1, 0)
self.assertGreater(count2, 0)
self.assertEquals(count0 + count1 + count2, num_reads)
def _verify_read_up_to_out(self, shared_queue):
with self.test_session():
num_files = 3
num_records_per_file = 7
tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(),
num_files=num_files,
num_records_per_file=num_records_per_file)
p_reader = parallel_reader.ParallelReader(
io_ops.TFRecordReader, shared_queue, num_readers=5)
data_files = parallel_reader.get_data_files(tfrecord_paths)
filename_queue = input_lib.string_input_producer(data_files, num_epochs=1)
key, value = p_reader.read_up_to(filename_queue, 4)
count0 = 0
count1 = 0
count2 = 0
all_keys_count = 0
all_values_count = 0
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
while True:
try:
current_keys, current_values = sess.run([key, value])
self.assertEquals(len(current_keys), len(current_values))
all_keys_count += len(current_keys)
all_values_count += len(current_values)
for current_key in current_keys:
if '0-of-3' in str(current_key):
count0 += 1
if '1-of-3' in str(current_key):
count1 += 1
if '2-of-3' in str(current_key):
count2 += 1
except errors_impl.OutOfRangeError:
break
self.assertEquals(count0, num_records_per_file)
self.assertEquals(count1, num_records_per_file)
self.assertEquals(count2, num_records_per_file)
self.assertEquals(
all_keys_count,
num_files * num_records_per_file)
self.assertEquals(all_values_count, all_keys_count)
self.assertEquals(
count0 + count1 + count2,
all_keys_count)
def testRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
def testFIFOSharedQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=256, dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
def testReadUpToFromRandomShuffleQueue(self):
shared_queue = data_flow_ops.RandomShuffleQueue(
capacity=55,
min_after_dequeue=28,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[tensor_shape.scalar(), tensor_shape.scalar()])
self._verify_read_up_to_out(shared_queue)
def testReadUpToFromFIFOQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=99,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[tensor_shape.scalar(), tensor_shape.scalar()])
self._verify_read_up_to_out(shared_queue)
class ParallelReadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testTFRecordReader(self):
with self.test_session():
self._tfrecord_paths = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=3)
key, value = parallel_reader.parallel_read(
self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)
sv = supervisor.Supervisor(logdir=self.get_temp_dir())
with sv.prepare_or_wait_for_session() as sess:
sv.start_queue_runners(sess)
flowers = 0
num_reads = 100
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if 'flowers' in str(current_key):
flowers += 1
self.assertGreater(flowers, 0)
self.assertEquals(flowers, num_reads)
class SinglePassReadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testOutOfRangeError(self):
with self.test_session():
[tfrecord_path] = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=1)
key, value = parallel_reader.single_pass_read(
tfrecord_path, reader_class=io_ops.TFRecordReader)
init_op = variables.local_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
with queues.QueueRunners(sess):
num_reads = 11
with self.assertRaises(errors_impl.OutOfRangeError):
for _ in range(num_reads):
sess.run([key, value])
def testTFRecordReader(self):
with self.test_session():
[tfrecord_path] = test_utils.create_tfrecord_files(
self.get_temp_dir(), num_files=1)
key, value = parallel_reader.single_pass_read(
tfrecord_path, reader_class=io_ops.TFRecordReader)
init_op = variables.local_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
with queues.QueueRunners(sess):
flowers = 0
num_reads = 9
for _ in range(num_reads):
current_key, _ = sess.run([key, value])
if 'flowers' in str(current_key):
flowers += 1
self.assertGreater(flowers, 0)
self.assertEquals(flowers, num_reads)
if __name__ == '__main__':
test.main()
| apache-2.0 | 4,977,076,060,767,239,000 | 33.234783 | 80 | 0.661798 | false |
TangXT/edx-platform | lms/djangoapps/instructor/tests/test_legacy_xss.py | 13 | 2296 | """
Tests of various instructor dashboard features that include lists of students
"""
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
from markupsafe import escape
from courseware.tests.tests import TEST_DATA_MIXED_MODULESTORE
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from instructor.views import legacy
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestXss(ModuleStoreTestCase):
def setUp(self):
self._request_factory = RequestFactory()
self._course = CourseFactory.create()
self._evil_student = UserFactory.create(
email="[email protected]",
username="evil-robot",
profile__name='<span id="evil">Evil Robot</span>',
)
self._instructor = UserFactory.create(
email="[email protected]",
username="instructor",
is_staff=True
)
CourseEnrollmentFactory.create(
user=self._evil_student,
course_id=self._course.id
)
def _test_action(self, action):
"""
Test for XSS vulnerability in the given action
Build a request with the given action, call the instructor dashboard
view, and check that HTML code in a user's name is properly escaped.
"""
req = self._request_factory.post(
"dummy_url",
data={"action": action}
)
req.user = self._instructor
req.session = {}
resp = legacy.instructor_dashboard(req, self._course.id.to_deprecated_string())
respUnicode = resp.content.decode(settings.DEFAULT_CHARSET)
self.assertNotIn(self._evil_student.profile.name, respUnicode)
self.assertIn(escape(self._evil_student.profile.name), respUnicode)
def test_list_enrolled(self):
self._test_action("List enrolled students")
def test_dump_list_of_enrolled(self):
self._test_action("Dump list of enrolled students")
def test_dump_grades(self):
self._test_action("Dump Grades for all students in this course")
| agpl-3.0 | -4,525,045,904,103,281,000 | 35.444444 | 87 | 0.675087 | false |
Khan/reviewboard | reviewboard/site/models.py | 2 | 3213 | #
# models.py -- Models for the "reviewboard.site" app.
#
# Copyright (c) 2010 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class LocalSite(models.Model):
"""
A division within a Review Board installation.
This allows the creation of independent, isolated divisions within a given
server. Users can be designated as members of a LocalSite, and optionally
as admins (which allows them to manipulate the repositories, groups and
users in the site).
Pretty much every other model in this module can all be assigned to a single
LocalSite, at which point only members will be able to see or manipulate
these objects. Access control is performed at every level, and consistency
is enforced through a liberal sprinkling of assertions and unit tests.
"""
name = models.SlugField(_('name'), max_length=32, blank=False, unique=True)
users = models.ManyToManyField(User, blank=True,
related_name='local_site')
admins = models.ManyToManyField(User, blank=True,
related_name='local_site_admins')
def is_accessible_by(self, user):
"""Returns whether or not the user has access to this LocalSite.
This checks that the user is logged in, and that they're listed in the
'users' field.
"""
return (user.is_authenticated() and
(user.is_staff or self.users.filter(pk=user.pk).exists()))
def is_mutable_by(self, user, perm='site.change_localsite'):
"""Returns whether or not a user can modify settings in a LocalSite.
This checks that the user is either staff with the proper permissions,
or that they're listed in the 'admins' field.
By default, this is checking whether the LocalSite itself can be
modified, but a different permission can be passed to check for
another object.
"""
return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()
def __unicode__(self):
return self.name
| mit | 5,471,718,910,881,945,000 | 43.013699 | 80 | 0.709617 | false |
xodus7/tensorflow | tensorflow/contrib/framework/python/ops/variables_test.py | 2 | 59220 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""variables tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver as saver_lib
class LocalVariableTest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
def testLocalVariableNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable([1, 1, 1, 1, 1], name='a')
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib2.get_local_variables())
def testLocalVariableNotInAllVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
self.assertFalse(a in variables_lib.global_variables())
self.assertTrue(a in variables_lib.local_variables())
def testLocalVariableNotInVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
self.assertFalse(a in variables_lib2.get_variables_to_restore())
self.assertTrue(a in variables_lib.local_variables())
def testGetVariablesDontReturnsTransients(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.local_variable(0)
with variable_scope.variable_scope('B'):
variables_lib2.local_variable(0)
self.assertEquals([], variables_lib2.get_variables('A'))
self.assertEquals([], variables_lib2.get_variables('B'))
def testGetLocalVariablesReturnsTransients(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.local_variable(0)
with variable_scope.variable_scope('B'):
b = variables_lib2.local_variable(0)
self.assertEquals([a], variables_lib2.get_local_variables('A'))
self.assertEquals([b], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.local_variable([0, 0, 0, 0, 0], name='a')
sess.run(variables_lib.local_variables_initializer())
self.assertAllEqual(a.eval(), [0] * 5)
def testResourceVariable(self):
a = variables_lib2.local_variable(0)
b = variables_lib2.local_variable(0, use_resource=True)
self.assertTrue(isinstance(a, variables_lib.Variable))
self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalVariableTest(test.TestCase):
def test_global_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.global_variables())
value0 = 42
variables_lib2.global_variable(value0)
value1 = 43
variables_lib2.global_variable(value1)
variables = variables_lib.global_variables()
self.assertEquals(2, len(variables))
with self.assertRaisesOpError(
'Attempting to use uninitialized value Variable'):
sess.run(variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
def testVariableNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable([1, 1, 1, 1, 1], name='a')
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib.global_variables())
def testGlobalVariableNotInLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
self.assertFalse(a in variables_lib.local_variables())
self.assertTrue(a in variables_lib.global_variables())
def testGlobalVariableInVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
self.assertFalse(a in variables_lib.local_variables())
self.assertTrue(a in variables_lib2.get_variables_to_restore())
def testGetVariablesReturnsThem(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.global_variable(0)
with variable_scope.variable_scope('B'):
b = variables_lib2.global_variable(0)
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetLocalVariablesDontReturnsThem(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.global_variable(0)
with variable_scope.variable_scope('B'):
variables_lib2.global_variable(0)
self.assertEquals([], variables_lib2.get_local_variables('A'))
self.assertEquals([], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.global_variable([0, 0, 0, 0, 0], name='a')
sess.run(variables_lib.global_variables_initializer())
self.assertAllEqual(a.eval(), [0] * 5)
def testResourceVariable(self):
a = variables_lib2.global_variable(0)
b = variables_lib2.global_variable(0, use_resource=True)
self.assertTrue(isinstance(a, variables_lib.Variable))
self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalStepTest(test.TestCase):
def _assert_global_step(self, global_step, expected_dtype=dtypes.int64):
self.assertEquals('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name)
self.assertEquals(expected_dtype, global_step.dtype.base_dtype)
self.assertEquals([], global_step.get_shape().as_list())
def test_invalid_dtype(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.Variable(
0.0,
trainable=False,
dtype=dtypes.float32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
variables_lib2.get_global_step)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
variables_lib2.get_global_step, g)
def test_invalid_shape(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.Variable(
[0],
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'not scalar',
variables_lib2.get_global_step)
self.assertRaisesRegexp(TypeError, 'not scalar',
variables_lib2.get_global_step, g)
def test_create_global_step(self):
self.assertEquals(None, variables_lib2.get_global_step())
with ops.Graph().as_default() as g:
global_step = variables_lib2.create_global_step()
self._assert_global_step(global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
variables_lib2.create_global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
variables_lib2.create_global_step, g)
self._assert_global_step(variables_lib2.create_global_step(ops.Graph()))
def test_get_global_step(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
variables_lib.Variable(
0,
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self._assert_global_step(
variables_lib2.get_global_step(), expected_dtype=dtypes.int32)
self._assert_global_step(
variables_lib2.get_global_step(g), expected_dtype=dtypes.int32)
def test_get_or_create_global_step(self):
with ops.Graph().as_default() as g:
self.assertEquals(None, variables_lib2.get_global_step())
self._assert_global_step(variables_lib2.get_or_create_global_step())
self._assert_global_step(variables_lib2.get_or_create_global_step(g))
class VariablesTest(test.TestCase):
def testCreateVariable(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertTrue(a in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in variables_lib.local_variables())
def testGetVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetVariablesWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A') as var_scope:
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertSetEqual(
set([a, b]), set(variables_lib2.get_variables(var_scope)))
def testGetVariablesSuffix(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('A'):
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables(suffix='a'))
self.assertEquals([b], variables_lib2.get_variables(suffix='b'))
def testGetVariableWithSingleVar(self):
with self.cached_session():
with variable_scope.variable_scope('parent'):
a = variables_lib2.variable('child', [5])
self.assertEquals(a, variables_lib2.get_unique_variable('parent/child'))
def testGetVariableWithDistractors(self):
with self.cached_session():
with variable_scope.variable_scope('parent'):
a = variables_lib2.variable('child', [5])
with variable_scope.variable_scope('child'):
variables_lib2.variable('grandchild1', [7])
variables_lib2.variable('grandchild2', [9])
self.assertEquals(a, variables_lib2.get_unique_variable('parent/child'))
def testGetVariableThrowsExceptionWithNoMatch(self):
var_name = 'cant_find_me'
with self.cached_session():
with self.assertRaises(ValueError):
variables_lib2.get_unique_variable(var_name)
def testGetThrowsExceptionWithChildrenButNoMatch(self):
var_name = 'parent/child'
with self.cached_session():
with variable_scope.variable_scope(var_name):
variables_lib2.variable('grandchild1', [7])
variables_lib2.variable('grandchild2', [9])
with self.assertRaises(ValueError):
variables_lib2.get_unique_variable(var_name)
def testGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables_to_restore())
def testIncludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([a], variables_lib2.get_variables_to_restore(['A']))
def testExcludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals(
[a], variables_lib2.get_variables_to_restore(exclude=['B']))
def testWrongIncludeGetVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.variable('a', [5])
self.assertEquals([a, b], variables_lib2.get_variables())
self.assertEquals([], variables_lib2.get_variables_to_restore(['a']))
def testGetMixedVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
with variable_scope.variable_scope('B'):
c = variables_lib2.variable('c', [5])
d = variables_lib2.variable('d', [5])
self.assertEquals([a, b, c, d], variables_lib2.get_variables())
self.assertEquals(
[a, c],
variables_lib2.get_variables_to_restore(include=['A/a', 'B/c']))
def testExcludeGetMixedVariablesToRestore(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
with variable_scope.variable_scope('B'):
c = variables_lib2.variable('c', [5])
d = variables_lib2.variable('d', [5])
self.assertEquals([a, b, c, d], variables_lib2.get_variables())
self.assertEquals(
[b, d],
variables_lib2.get_variables_to_restore(exclude=['A/a', 'B/c']))
def testReuseVariable(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [])
with variable_scope.variable_scope('A', reuse=True):
b = variables_lib2.variable('a', [])
self.assertEquals(a, b)
self.assertListEqual([a], variables_lib2.get_variables())
def testVariableWithRegularizer(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [], regularizer=nn_ops.l2_loss)
loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithRegularizerColocate(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable(
'a', [], device='gpu:0', regularizer=nn_ops.l2_loss)
loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0]
self.assertDeviceEqual(loss.device, a.device)
def testVariableWithDevice(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [], device='cpu:0')
b = variables_lib2.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFromScope(self):
with self.cached_session():
with ops.device('/cpu:0'):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFunction(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return 'cpu:%d' % self.counter
with self.cached_session():
with arg_scope([variables_lib2.variable], device=DevFn()):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, 'cpu:1')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, 'cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, 'cpu:2')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, 'cpu:3')
self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
def testVariableWithReplicaDeviceSetter(self):
with self.cached_session():
with ops.device(device_setter.replica_device_setter(ps_tasks=2)):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the replica_device_setter puts initial
# values on the worker job, and how it merges explicit devices.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')
def testVariableWithVariableDeviceChooser(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(num_tasks=2)
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableWithVariableDeviceChooserWithReplica(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(replica=3, num_tasks=2)
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/job:ps/replica:3/task:0/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/job:ps/replica:3/task:1/cpu:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/job:ps/replica:3/task:0/cpu:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/job:ps/replica:3/task:1/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
def testVariableGPUPlacement(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser(device_type='GPU')
with arg_scope([variables_lib2.variable], device=device_fn):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], device='cpu:12')
d = variables_lib2.variable('d', [])
with ops.device('cpu:99'):
e_init = constant_op.constant(12)
e = variables_lib2.variable('e', initializer=e_init)
# The values below highlight how the VariableDeviceChooser puts initial
# values on the same device as the variable job.
self.assertDeviceEqual(a.device, '/device:GPU:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/device:GPU:0')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
self.assertDeviceEqual(c.device, '/cpu:12')
self.assertEqual(c.initial_value.op.colocation_groups(),
c.op.colocation_groups())
self.assertDeviceEqual(d.device, '/device:GPU:0')
self.assertEqual(d.initial_value.op.colocation_groups(),
d.op.colocation_groups())
self.assertDeviceEqual(e.device, '/device:GPU:0')
self.assertDeviceEqual(e.initial_value.device, '/cpu:99')
class ModelVariablesTest(test.TestCase):
def testNameAndShape(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
self.assertListEqual([a], variables_lib2.get_model_variables('A'))
def testNotInLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
self.assertTrue(a in variables_lib.global_variables())
self.assertTrue(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES))
self.assertFalse(a in variables_lib.local_variables())
def testGetVariablesReturns(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.model_variable('a', [5])
self.assertEquals([a], variables_lib2.get_variables('A'))
self.assertEquals([b], variables_lib2.get_variables('B'))
def testGetModelVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
b = variables_lib2.model_variable('a', [5])
self.assertEquals([a], variables_lib2.get_model_variables('A'))
self.assertEquals([b], variables_lib2.get_model_variables('B'))
def testGetTrainableVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
variables_lib2.local_variable([5])
a = variables_lib.Variable([5])
with variable_scope.variable_scope('B'):
variables_lib2.local_variable([5])
b = variables_lib.Variable([5])
self.assertEquals([a], variables_lib2.get_trainable_variables('A'))
self.assertEquals([b], variables_lib2.get_trainable_variables('B'))
def testGetLocalVariables(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
_ = variables_lib2.model_variable('a', [5])
with variable_scope.variable_scope('B'):
_ = variables_lib2.model_variable('a', [5])
self.assertEquals([], variables_lib2.get_local_variables('A'))
self.assertEquals([], variables_lib2.get_local_variables('B'))
def testInitializedVariableValue(self):
with self.cached_session() as sess:
a = variables_lib2.model_variable(
'a', [5], initializer=init_ops.ones_initializer())
sess.run(variables_lib.global_variables_initializer())
self.assertAllEqual(a.eval(), [1] * 5)
def testDeviceFn(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return '/cpu:%d' % self.counter
with ops.Graph().as_default():
with arg_scope([variables_lib2.model_variable], device=DevFn()):
a = variables_lib2.model_variable('a', [5])
b = variables_lib2.model_variable('b', [20])
self.assertDeviceEqual(a.device, '/cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, '/cpu:1')
self.assertEqual(b.initial_value.op.colocation_groups(),
b.op.colocation_groups())
def testVariableWithVariableDeviceChooser(self):
with ops.Graph().as_default():
device_fn = variables_lib2.VariableDeviceChooser()
with arg_scope([variables_lib2.model_variable], device=device_fn):
a = variables_lib2.model_variable('a', [5])
b = variables_lib2.model_variable('b', [20])
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
self.assertDeviceEqual(b.device, 'cpu:0')
self.assertEqual(a.initial_value.op.colocation_groups(),
a.op.colocation_groups())
class GetVariablesCollections(test.TestCase):
def testVariableCollection(self):
with self.cached_session():
a = variables_lib2.variable('a', [], collections='A')
b = variables_lib2.variable('b', [], collections='B')
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
def testVariableCollections(self):
with self.cached_session():
a = variables_lib2.variable('a', [], collections=['A', 'C'])
b = variables_lib2.variable('b', [], collections=['B', 'C'])
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
self.assertListEqual([a, b], ops.get_collection('C'))
def testVariableCollectionsWithArgScope(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
b = variables_lib2.variable('b', [])
self.assertListEqual([a, b], ops.get_collection('A'))
def testVariableCollectionsWithArgScopeNested(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
self.assertEquals(a, ops.get_collection('A')[0])
self.assertEquals(b, ops.get_collection('B')[0])
def testVariableCollectionsWithArgScopeNonNested(self):
with self.cached_session():
with arg_scope([variables_lib2.variable], collections='A'):
a = variables_lib2.variable('a', [])
with arg_scope([variables_lib2.variable], collections='B'):
b = variables_lib2.variable('b', [])
variables_lib2.variable('c', [])
self.assertListEqual([a], ops.get_collection('A'))
self.assertListEqual([b], ops.get_collection('B'))
def testVariableRestoreWithArgScopeNested(self):
with self.cached_session():
a = variables_lib2.variable('a', [])
with arg_scope(
[variables_lib2.variable], trainable=False, collections=['A', 'B']):
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], trainable=False)
self.assertEquals([a, c], variables_lib2.get_variables_to_restore())
self.assertEquals([a], variables_lib.trainable_variables())
self.assertEquals([b], ops.get_collection('A'))
self.assertEquals([b], ops.get_collection('B'))
class GetVariablesBySuffixTest(test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables_by_suffix('a'))
self.assertEquals([b], variables_lib2.get_variables_by_suffix('b'))
def testGetVariableWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
with variable_scope.variable_scope('B'):
a2 = variables_lib2.variable('a', [5])
matched_variables = variables_lib2.get_variables_by_suffix('a')
self.assertEquals([a, fooa, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('/a')
self.assertEquals([a, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('a', scope='A')
self.assertEquals([a, fooa], matched_variables)
def testGetVariableWithoutScope(self):
with self.cached_session():
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
b_a = variables_lib2.variable('B/a', [5])
matched_variables = variables_lib2.get_variables_by_suffix('a')
self.assertEquals([a, fooa, b_a], matched_variables)
matched_variables = variables_lib2.get_variables_by_suffix('fooa')
self.assertEquals([fooa], matched_variables)
class GetVariablesByNameTest(test.TestCase):
def testGetVariableGivenNameScoped(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
b = variables_lib2.variable('b', [5])
self.assertEquals([a], variables_lib2.get_variables_by_name('a'))
self.assertEquals([b], variables_lib2.get_variables_by_name('b'))
def testGetVariableWithScope(self):
with self.cached_session():
with variable_scope.variable_scope('A'):
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
with variable_scope.variable_scope('B'):
a2 = variables_lib2.variable('a', [5])
matched_variables = variables_lib2.get_variables_by_name('a')
self.assertEquals([a, a2], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('/a')
self.assertEquals([], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('a', scope='A')
self.assertEquals([a], matched_variables)
def testGetVariableWithoutScope(self):
with self.cached_session():
a = variables_lib2.variable('a', [5])
fooa = variables_lib2.variable('fooa', [5])
b_a = variables_lib2.variable('B/a', [5])
matched_variables = variables_lib2.get_variables_by_name('a')
self.assertEquals([a, b_a], matched_variables)
matched_variables = variables_lib2.get_variables_by_name('fooa')
self.assertEquals([fooa], matched_variables)
class GetVariableFullNameTest(test.TestCase):
def testVariable(self):
my_var0 = variables_lib2.variable('my_var0', shape=[])
full_name = variables_lib2.get_variable_full_name(my_var0)
self.assertEquals(full_name, my_var0.op.name)
def testPartitionedVariable(self):
input_full_name = 'my_var0'
partitioner = partitioned_variables.variable_axis_size_partitioner(2)
my_var0 = variables_lib2.variable(
'my_var0', shape=[2, 2], partitioner=partitioner)
for part_var in list(my_var0):
computed_full_name = variables_lib2.get_variable_full_name(part_var)
self.assertEquals(input_full_name, computed_full_name)
class AssignFromValuesTest(test.TestCase):
def testNoScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
assign_op, feed_dict = variables_lib2.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {
'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1
}
assign_op, feed_dict = variables_lib2.assign_from_values(
var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(assign_op, feed_dict)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
class AssignFromValuesFnTest(test.TestCase):
def testNoScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}
init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.cached_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {
'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1
}
init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
class AssignFromCheckpointTest(test.TestCase):
def create_checkpoint_from_values(self,
var_names_to_values,
checkpoint_dir,
global_step=None):
"""Creates a checkpoint from a mapping of name to values in model_dir.
Args:
var_names_to_values: a map from variable names to values.
checkpoint_dir: the directory where the checkpoint will be saved.
global_step: the global step used to save the checkpoint.
Returns:
the model_path to the checkpoint.
"""
var_list = []
with session.Session('', graph=ops.Graph()) as sess:
# Create a set of variables to save in the checkpoint.
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var_list.append(variables_lib.Variable(var_value, name=var_name))
saver = saver_lib.Saver(var_list)
init_op = variables_lib.variables_initializer(var_list)
sess.run(init_op)
# Save the initialized values in the file at 'checkpoint_dir'
return saver.save(sess, checkpoint_dir, global_step=global_step)
def testLoadExistingVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables'))
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
# Tests restoring PartitionedVariables and tests using a dictionary
# of lists as the assign_from_checkpoint() var_list param.
def testLoadPartitionedVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_partitioned_variables'))
init_value0 = np.array([[10.0, 11.0], [12.0, 13.0]])
init_value1 = np.array([20.0]) # Partitioned into 1 part, edge case.
var_names_to_values = {'var0': init_value0, 'var1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
# var0 and var1 are PartitionedVariables.
partitioner = partitioned_variables.variable_axis_size_partitioner(2)
var0 = variables_lib2.variable(
'var0', shape=init_value0.shape, partitioner=partitioner)
var0full = variables_lib2.variable('var0full', shape=init_value0.shape)
var1 = variables_lib2.variable(
'var1', shape=init_value1.shape, partitioner=partitioner)
# Convert var0 and var1 into a list of underlying variables.
vars_to_restore = {'var0': list(var0) + [var0full], 'var1': list(var1)}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values. PartitionedVariables can't
# be evaled so we wrap them in an identity.
self.assertTrue(
np.array_equal(init_value0,
array_ops.identity(var0).eval()))
self.assertTrue(np.array_equal(init_value0, var0full.eval()))
self.assertTrue(
np.array_equal(init_value1,
array_ops.identity(var1).eval()))
def testRaisesValueErrorIfAVariableIsntFound(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'raises_value_error_if_var_isnt_found'))
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session():
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0_fake': var0, 'v1': var1}
with self.assertRaises(ValueError):
variables_lib2.assign_from_checkpoint(model_path, vars_to_restore)
def testInitFromCheckpointWithScopes(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'init_from_checkpoint_with_scopes'))
init_value0 = np.asarray(
[1.0, 3.0, 9.0], dtype=np.float32).reshape((1, 3, 1))
init_value1 = np.asarray(
[2.0, 4.0, 6.0, 8.0], dtype=np.float32).reshape((2, 1, 2))
var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable('my_var0', shape=init_value0.shape)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable('my_var1', shape=init_value1.shape)
vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1}
op, feed_dict = variables_lib2.assign_from_checkpoint(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertAllEqual(init_value0, var0.eval())
self.assertAllEqual(init_value1, var1.eval())
class AssignFromCheckpointFnTest(test.TestCase):
def create_checkpoint_from_values(self,
var_names_to_values,
checkpoint_dir,
global_step=None):
"""Creates a checkpoint from a mapping of name to values in model_dir.
Args:
var_names_to_values: a map from variable names to values.
checkpoint_dir: the directory where the checkpoint will be saved.
global_step: the global step used to save the checkpoint.
Returns:
the model_path to the checkpoint.
"""
var_list = []
with session.Session('', graph=ops.Graph()) as sess:
# Create a set of variables to save in the checkpoint.
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var_list.append(variables_lib.Variable(var_value, name=var_name))
saver = saver_lib.Saver(var_list)
init_op = variables_lib.variables_initializer(var_list)
sess.run(init_op)
# Save the initialized values in the file at 'checkpoint_dir'
return saver.save(sess, checkpoint_dir, global_step=global_step)
def testLoadExistingVariables(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
def testLoadExistingVariablesDifferentShapeDefaultDoesNotAllowReshape(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(),
'load_existing_vars_no_reshape'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = [[10.0, 11.0]]
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[2, 1])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
with self.assertRaises(errors_impl.InvalidArgumentError):
init_fn(sess)
def testLoadExistingVariablesDifferentShapeAllowReshape(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(
self.get_temp_dir(),
'load_existing_variables_different_shape_allow_reshape'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = [[10.0, 11.0]]
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[2, 1])
var1 = variables_lib2.variable('my_var1', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, reshape_variables=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertAllEqual(np.transpose(np.array(init_value0)), var0.eval())
self.assertEqual(init_value1, var1.eval())
def testNotFoundError(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'not_found_error'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
var2 = variables_lib2.variable('my_var2', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
with self.assertRaises(errors_impl.NotFoundError):
init_fn(sess)
def testMissingVariablesList(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'missing_variables_list'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('v0', shape=[])
var1 = variables_lib2.variable('v1', shape=[])
var2 = variables_lib2.variable('v2', shape=[])
vars_to_restore = [var0, var1, var2]
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, ignore_missing_vars=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
def testMissingVariablesDict(self):
model_dir = tempfile.mkdtemp(
prefix=os.path.join(self.get_temp_dir(), 'missing_variables_dict'))
if gfile.Exists(model_dir):
gfile.DeleteRecursively(model_dir)
init_value0 = 10.0
init_value1 = 20.0
var_names_to_values = {'v0': init_value0, 'v1': init_value1}
with self.cached_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
var0 = variables_lib2.variable('my_var0', shape=[])
var1 = variables_lib2.variable('my_var1', shape=[])
var2 = variables_lib2.variable('my_var2', shape=[])
vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2}
init_fn = variables_lib2.assign_from_checkpoint_fn(
model_path, vars_to_restore, ignore_missing_vars=True)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
self.assertEqual(init_value0, var0.eval())
self.assertEqual(init_value1, var1.eval())
class ZeroInitializerOpTest(test.TestCase):
def _testZeroInitializer(self, shape, initializer, use_init):
var = variables_lib.Variable(initializer)
var_zero = variables_lib2.zero_initializer(var)
with self.cached_session() as sess:
with self.assertRaisesOpError('Attempting to use uninitialized value'):
var.eval()
if use_init:
sess.run(var.initializer)
with self.assertRaisesOpError('input is already initialized'):
var_zero.eval()
self.assertAllClose(np.ones(shape), var.eval())
else:
var_zero.eval()
self.assertAllClose(np.zeros(shape), var.eval())
def testZeroInitializer(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64):
for use_init in (False, True):
self._testZeroInitializer([10, 20], array_ops.ones(
[10, 20], dtype=dtype), use_init)
class ZeroVarInitializerOpTest(test.TestCase):
def _testZeroVarInitializer(self, shape, initializer, use_init):
var = resource_variable_ops.ResourceVariable(initializer)
var_zero = variables_lib2.zero_initializer(var)
with self.cached_session() as sess:
with self.assertRaisesOpError('Error while reading resource variable'):
var.eval()
if use_init:
sess.run(var.initializer)
with self.assertRaisesOpError('input is already initialized'):
var_zero.eval()
self.assertAllClose(np.ones(shape), var.eval())
else:
var_zero.eval()
self.assertAllClose(np.zeros(shape), var.eval())
def testZeroVarInitializer(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64):
for use_init in (False, True):
self._testZeroVarInitializer([10, 20],
array_ops.ones([10, 20], dtype=dtype),
use_init)
class FilterVariablesTest(test.TestCase):
def setUp(self):
g = ops.Graph()
with g.as_default():
var_list = []
var_list.append(variables_lib.Variable(0, name='conv1/weights'))
var_list.append(variables_lib.Variable(0, name='conv1/biases'))
var_list.append(variables_lib.Variable(0, name='conv2/weights'))
var_list.append(variables_lib.Variable(0, name='conv2/biases'))
var_list.append(variables_lib.Variable(0, name='clfs/weights'))
var_list.append(variables_lib.Variable(0, name='clfs/biases'))
self._var_list = var_list
def _test_filter_variables(self,
expected_var_names,
include_patterns=None,
exclude_patterns=None,
reg_search=True):
filtered_var_list = variables_lib2.filter_variables(
self._var_list,
include_patterns=include_patterns,
exclude_patterns=exclude_patterns,
reg_search=reg_search)
filtered_var_names = [var.op.name for var in filtered_var_list]
for name in filtered_var_names:
self.assertIn(name, expected_var_names)
for name in expected_var_names:
self.assertIn(name, filtered_var_names)
self.assertEqual(len(filtered_var_names), len(expected_var_names))
def testNoFiltering(self):
self._test_filter_variables(expected_var_names=[
'conv1/weights', 'conv1/biases', 'conv2/weights', 'conv2/biases',
'clfs/weights', 'clfs/biases'
])
def testIncludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
include_patterns=['biases'])
def testExcludeWeights(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
exclude_patterns=['weights'])
def testExcludeWeightsAndConv1(self):
self._test_filter_variables(
expected_var_names=['conv2/biases', 'clfs/biases'],
exclude_patterns=['weights', 'conv1'])
def testTwoIncludePatternsEnsureNoVariablesTwiceInFilteredList(self):
self._test_filter_variables(
expected_var_names=[
'conv1/weights', 'conv1/biases', 'conv2/weights', 'clfs/weights'
],
include_patterns=['conv1', 'weights'])
def testIncludeConv1ExcludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/weights'],
include_patterns=['conv1'],
exclude_patterns=['biases'])
def testRegMatchIncludeBiases(self):
self._test_filter_variables(
expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'],
include_patterns=['.*biases'],
reg_search=False)
def testRegMatchIncludeBiasesWithIncompleteRegExpHasNoMatches(self):
self._test_filter_variables(
expected_var_names=[], include_patterns=['biases'], reg_search=False)
if __name__ == '__main__':
test.main()
| apache-2.0 | -6,440,173,154,006,627,000 | 40.528752 | 80 | 0.647619 | false |
gangadhar-kadam/sapphire_app | selling/report/customers_not_buying_since_long_time/customers_not_buying_since_long_time.py | 2 | 1965 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import getdate, cint
def execute(filters=None):
if not filters: filters ={}
days_since_last_order = filters.get("days_since_last_order")
if cint(days_since_last_order) <= 0:
webnotes.msgprint("Please mention positive value in 'Days Since Last Order' field",raise_exception=1)
columns = get_columns()
customers = get_so_details()
data = []
for cust in customers:
if cust[8] >= days_since_last_order:
cust.insert(7,get_last_so_amt(cust[0]))
data.append(cust)
return columns, data
def get_so_details():
return webnotes.conn.sql("""select
cust.name,
cust.customer_name,
cust.territory,
cust.customer_group,
count(distinct(so.name)) as 'num_of_order',
sum(net_total) as 'total_order_value',
sum(if(so.status = "Stopped",
so.net_total * so.per_delivered/100,
so.net_total)) as 'total_order_considered',
max(so.transaction_date) as 'last_sales_order_date',
DATEDIFF(CURDATE(), max(so.transaction_date)) as 'days_since_last_order'
from `tabCustomer` cust, `tabSales Order` so
where cust.name = so.customer and so.docstatus = 1
group by cust.name
order by 'days_since_last_order' desc """,as_list=1)
def get_last_so_amt(customer):
res = webnotes.conn.sql("""select net_total from `tabSales Order`
where customer ='%(customer)s' and docstatus = 1 order by transaction_date desc
limit 1""" % {'customer':customer})
return res and res[0][0] or 0
def get_columns():
return [
"Customer:Link/Customer:120",
"Customer Name:Data:120",
"Territory::120",
"Customer Group::120",
"Number of Order::120",
"Total Order Value:Currency:120",
"Total Order Considered:Currency:160",
"Last Order Amount:Currency:160",
"Last Sales Order Date:Date:160",
"Days Since Last Order::160"
] | agpl-3.0 | 3,845,790,205,213,018,000 | 30.709677 | 103 | 0.69313 | false |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/lmfit-py/lmfit/printfuncs.py | 7 | 6963 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 20 19:24:21 2012
@author: Tillsten
Changes:
- 13-Feb-2013 M Newville
complemented "report_errors" and "report_ci" with
"error_report" and "ci_report" (respectively) which
return the text of the report. Thus report_errors()
is simply:
def report_errors(params, modelpars=None, show_correl=True):
print error_report(params, modelpars=modelpars,
show_correl=show_correl)
and similar for report_ci() / ci_report()
"""
from __future__ import print_function
from .parameter import Parameters
import re
def alphanumeric_sort(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def getfloat_attr(obj, attr, fmt='%.3f'):
"format an attribute of an object for printing"
val = getattr(obj, attr, None)
if val is None:
return 'unknown'
if isinstance(val, int):
return '%d' % val
if isinstance(val, float):
return fmt % val
else:
return repr(val)
def gformat(val, length=11):
"""format a number with '%g'-like format, except that
the return will be length ``length`` (default=12)
and have at least length-6 significant digits
"""
length = max(length, 7)
fmt = '{: .%ig}' % (length-6)
if isinstance(val, int):
out = ('{: .%ig}' % (length-2)).format(val)
if len(out) > length:
out = fmt.format(val)
else:
out = fmt.format(val)
if len(out) < length:
if 'e' in out:
ie = out.find('e')
if '.' not in out[:ie]:
out = out[:ie] + '.' + out[ie:]
out = out.replace('e', '0'*(length-len(out))+'e')
else:
fmt = '{: .%ig}' % (length-1)
out = fmt.format(val)[:length]
if len(out) < length:
pad = '0' if '.' in out else ' '
out += pad*(length-len(out))
return out
CORREL_HEAD = '[[Correlations]] (unreported correlations are < % .3f)'
def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
sort_pars=False):
"""return text of a report for fitted params best-fit values,
uncertainties and correlations
arguments
----------
inpars Parameters from fit or Minizer object returned from a fit.
modelpars Optional Known Model Parameters [None]
show_correl whether to show list of sorted correlations [True]
min_correl smallest correlation absolute value to show [0.1]
sort_pars If True, then fit_report will show parameter names
sorted in alphanumerical order. If False, then the
parameters will be listed in the order they were added to
the Parameters dictionary. If sort_pars is callable, then
this (one argument) function is used to extract a
comparison key from each list element.
"""
if isinstance(inpars, Parameters):
result, params = None, inpars
if hasattr(inpars, 'params'):
result = inpars
params = inpars.params
if sort_pars:
if callable(sort_pars):
key = sort_pars
else:
key = alphanumeric_sort
parnames = sorted(params, key=key)
else:
# dict.keys() returns a KeysView in py3, and they're indexed further
# down
parnames = list(params.keys())
buff = []
add = buff.append
if result is not None:
add("[[Fit Statistics]]")
add(" # function evals = %s" % getfloat_attr(result, 'nfev'))
add(" # data points = %s" % getfloat_attr(result, 'ndata'))
add(" # variables = %s" % getfloat_attr(result, 'nvarys'))
add(" chi-square = %s" % getfloat_attr(result, 'chisqr'))
add(" reduced chi-square = %s" % getfloat_attr(result, 'redchi'))
namelen = max([len(n) for n in parnames])
add("[[Variables]]")
for name in parnames:
par = params[name]
space = ' '*(namelen+1-len(name))
nout = "%s:%s" % (name, space)
inval = '(init= ?)'
if par.init_value is not None:
inval = '(init=% .7g)' % par.init_value
if modelpars is not None and name in modelpars:
inval = '%s, model_value =% .7g' % (inval, modelpars[name].value)
try:
sval = gformat(par.value)
except (TypeError, ValueError):
sval = 'Non Numeric Value?'
if par.stderr is not None:
serr = gformat(par.stderr, length=9)
try:
spercent = '({:.2%})'.format(abs(par.stderr/par.value))
except ZeroDivisionError:
spercent = ''
sval = '%s +/-%s %s' % (sval, serr, spercent)
if par.vary:
add(" %s %s %s" % (nout, sval, inval))
elif par.expr is not None:
add(" %s %s == '%s'" % (nout, sval, par.expr))
else:
add(" %s % .7g (fixed)" % (nout, par.value))
if show_correl:
add(CORREL_HEAD % min_correl)
correls = {}
for i, name in enumerate(parnames):
par = params[name]
if not par.vary:
continue
if hasattr(par, 'correl') and par.correl is not None:
for name2 in parnames[i+1:]:
if (name != name2 and name2 in par.correl and
abs(par.correl[name2]) > min_correl):
correls["%s, %s" % (name, name2)] = par.correl[name2]
sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
sort_correl.reverse()
for name, val in sort_correl:
lspace = max(1, 25 - len(name))
add(' C(%s)%s = % .3f ' % (name, (' '*30)[:lspace], val))
return '\n'.join(buff)
def report_errors(params, **kws):
"""print a report for fitted params: see error_report()"""
print(fit_report(params, **kws))
def report_fit(params, **kws):
"""print a report for fitted params: see error_report()"""
print(fit_report(params, **kws))
def ci_report(ci):
"""return text of a report for confidence intervals"""
maxlen = max([len(i) for i in ci])
buff = []
add = buff.append
convp = lambda x: ("%.2f" % (x[0]*100))+'%'
conv = lambda x: "%.5f" % x[1]
title_shown = False
for name, row in ci.items():
if not title_shown:
add("".join([''.rjust(maxlen)]+[i.rjust(10)
for i in map(convp, row)]))
title_shown = True
add("".join([name.rjust(maxlen)]+[i.rjust(10)
for i in map(conv, row)]))
return '\n'.join(buff)
def report_ci(ci):
"""print a report for confidence intervals"""
print(ci_report(ci))
| apache-2.0 | 7,645,644,019,731,446,000 | 34.345178 | 78 | 0.537125 | false |
chromium/chromium | net/tools/build_hpack_constants.py | 43 | 18730 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script builds a table that has to be manully inserted into
net/spdy/hpack/hpack_constants.cc. It also contains data that potentially has to
be updated if the http hpack algorithm changes its constants."""
import re
# This is from
# http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-08
# It may include dummy rows as long as those don't look too much like real
# data.
SPEC_DATA_DRAFT_08 = r"""
code
code as bits as hex len
sym aligned to MSB aligned in
to LSB bits
( 0) |11111111|11000 1ff8 [13]
( 1) |11111111|11111111|1011000 7fffd8 [23]
( 2) |11111111|11111111|11111110|0010 fffffe2 [28]
( 3) |11111111|11111111|11111110|0011 fffffe3 [28]
( 4) |11111111|11111111|11111110|0100 fffffe4 [28]
( 5) |11111111|11111111|11111110|0101 fffffe5 [28]
( 6) |11111111|11111111|11111110|0110 fffffe6 [28]
( 7) |11111111|11111111|11111110|0111 fffffe7 [28]
( 8) |11111111|11111111|11111110|1000 fffffe8 [28]
( 9) |11111111|11111111|11101010 ffffea [24]
( 10) |11111111|11111111|11111111|111100 3ffffffc [30]
( 11) |11111111|11111111|11111110|1001 fffffe9 [28]
( 12) |11111111|11111111|11111110|1010 fffffea [28]
( 13) |11111111|11111111|11111111|111101 3ffffffd [30]
( 14) |11111111|11111111|11111110|1011 fffffeb [28]
( 15) |11111111|11111111|11111110|1100 fffffec [28]
( 16) |11111111|11111111|11111110|1101 fffffed [28]
( 17) |11111111|11111111|11111110|1110 fffffee [28]
( 18) |11111111|11111111|11111110|1111 fffffef [28]
( 19) |11111111|11111111|11111111|0000 ffffff0 [28]
( 20) |11111111|11111111|11111111|0001 ffffff1 [28]
( 21) |11111111|11111111|11111111|0010 ffffff2 [28]
( 22) |11111111|11111111|11111111|111110 3ffffffe [30]
( 23) |11111111|11111111|11111111|0011 ffffff3 [28]
( 24) |11111111|11111111|11111111|0100 ffffff4 [28]
( 25) |11111111|11111111|11111111|0101 ffffff5 [28]
( 26) |11111111|11111111|11111111|0110 ffffff6 [28]
( 27) |11111111|11111111|11111111|0111 ffffff7 [28]
( 28) |11111111|11111111|11111111|1000 ffffff8 [28]
( 29) |11111111|11111111|11111111|1001 ffffff9 [28]
( 30) |11111111|11111111|11111111|1010 ffffffa [28]
( 31) |11111111|11111111|11111111|1011 ffffffb [28]
' ' ( 32) |010100 14 [ 6]
'!' ( 33) |11111110|00 3f8 [10]
'"' ( 34) |11111110|01 3f9 [10]
'#' ( 35) |11111111|1010 ffa [12]
'$' ( 36) |11111111|11001 1ff9 [13]
'%' ( 37) |010101 15 [ 6]
'&' ( 38) |11111000 f8 [ 8]
''' ( 39) |11111111|010 7fa [11]
'(' ( 40) |11111110|10 3fa [10]
')' ( 41) |11111110|11 3fb [10]
'*' ( 42) |11111001 f9 [ 8]
'+' ( 43) |11111111|011 7fb [11]
',' ( 44) |11111010 fa [ 8]
'-' ( 45) |010110 16 [ 6]
'.' ( 46) |010111 17 [ 6]
'/' ( 47) |011000 18 [ 6]
'0' ( 48) |00000 0 [ 5]
'1' ( 49) |00001 1 [ 5]
'2' ( 50) |00010 2 [ 5]
'3' ( 51) |011001 19 [ 6]
'4' ( 52) |011010 1a [ 6]
'5' ( 53) |011011 1b [ 6]
'6' ( 54) |011100 1c [ 6]
'7' ( 55) |011101 1d [ 6]
'8' ( 56) |011110 1e [ 6]
'9' ( 57) |011111 1f [ 6]
':' ( 58) |1011100 5c [ 7]
';' ( 59) |11111011 fb [ 8]
'<' ( 60) |11111111|1111100 7ffc [15]
'=' ( 61) |100000 20 [ 6]
'>' ( 62) |11111111|1011 ffb [12]
'?' ( 63) |11111111|00 3fc [10]
'@' ( 64) |11111111|11010 1ffa [13]
'A' ( 65) |100001 21 [ 6]
'B' ( 66) |1011101 5d [ 7]
'C' ( 67) |1011110 5e [ 7]
'D' ( 68) |1011111 5f [ 7]
'E' ( 69) |1100000 60 [ 7]
'F' ( 70) |1100001 61 [ 7]
'G' ( 71) |1100010 62 [ 7]
'H' ( 72) |1100011 63 [ 7]
'I' ( 73) |1100100 64 [ 7]
'J' ( 74) |1100101 65 [ 7]
'K' ( 75) |1100110 66 [ 7]
'L' ( 76) |1100111 67 [ 7]
'M' ( 77) |1101000 68 [ 7]
'N' ( 78) |1101001 69 [ 7]
'O' ( 79) |1101010 6a [ 7]
'P' ( 80) |1101011 6b [ 7]
'Q' ( 81) |1101100 6c [ 7]
'R' ( 82) |1101101 6d [ 7]
'S' ( 83) |1101110 6e [ 7]
'T' ( 84) |1101111 6f [ 7]
'U' ( 85) |1110000 70 [ 7]
'V' ( 86) |1110001 71 [ 7]
'W' ( 87) |1110010 72 [ 7]
'X' ( 88) |11111100 fc [ 8]
'Y' ( 89) |1110011 73 [ 7]
'Z' ( 90) |11111101 fd [ 8]
'[' ( 91) |11111111|11011 1ffb [13]
'\' ( 92) |11111111|11111110|000 7fff0 [19]
']' ( 93) |11111111|11100 1ffc [13]
'^' ( 94) |11111111|111100 3ffc [14]
'_' ( 95) |100010 22 [ 6]
'`' ( 96) |11111111|1111101 7ffd [15]
'a' ( 97) |00011 3 [ 5]
'b' ( 98) |100011 23 [ 6]
'c' ( 99) |00100 4 [ 5]
'd' (100) |100100 24 [ 6]
'e' (101) |00101 5 [ 5]
'f' (102) |100101 25 [ 6]
'g' (103) |100110 26 [ 6]
'h' (104) |100111 27 [ 6]
'i' (105) |00110 6 [ 5]
'j' (106) |1110100 74 [ 7]
'k' (107) |1110101 75 [ 7]
'l' (108) |101000 28 [ 6]
'm' (109) |101001 29 [ 6]
'n' (110) |101010 2a [ 6]
'o' (111) |00111 7 [ 5]
'p' (112) |101011 2b [ 6]
'q' (113) |1110110 76 [ 7]
'r' (114) |101100 2c [ 6]
's' (115) |01000 8 [ 5]
't' (116) |01001 9 [ 5]
'u' (117) |101101 2d [ 6]
'v' (118) |1110111 77 [ 7]
'w' (119) |1111000 78 [ 7]
'x' (120) |1111001 79 [ 7]
'y' (121) |1111010 7a [ 7]
'z' (122) |1111011 7b [ 7]
'{' (123) |11111111|1111110 7ffe [15]
'|' (124) |11111111|100 7fc [11]
'}' (125) |11111111|111101 3ffd [14]
'~' (126) |11111111|11101 1ffd [13]
(127) |11111111|11111111|11111111|1100 ffffffc [28]
(128) |11111111|11111110|0110 fffe6 [20]
(129) |11111111|11111111|010010 3fffd2 [22]
(130) |11111111|11111110|0111 fffe7 [20]
(131) |11111111|11111110|1000 fffe8 [20]
(132) |11111111|11111111|010011 3fffd3 [22]
(133) |11111111|11111111|010100 3fffd4 [22]
(134) |11111111|11111111|010101 3fffd5 [22]
(135) |11111111|11111111|1011001 7fffd9 [23]
(136) |11111111|11111111|010110 3fffd6 [22]
(137) |11111111|11111111|1011010 7fffda [23]
(138) |11111111|11111111|1011011 7fffdb [23]
(139) |11111111|11111111|1011100 7fffdc [23]
(140) |11111111|11111111|1011101 7fffdd [23]
(141) |11111111|11111111|1011110 7fffde [23]
(142) |11111111|11111111|11101011 ffffeb [24]
(143) |11111111|11111111|1011111 7fffdf [23]
(144) |11111111|11111111|11101100 ffffec [24]
(145) |11111111|11111111|11101101 ffffed [24]
(146) |11111111|11111111|010111 3fffd7 [22]
(147) |11111111|11111111|1100000 7fffe0 [23]
(148) |11111111|11111111|11101110 ffffee [24]
(149) |11111111|11111111|1100001 7fffe1 [23]
(150) |11111111|11111111|1100010 7fffe2 [23]
(151) |11111111|11111111|1100011 7fffe3 [23]
(152) |11111111|11111111|1100100 7fffe4 [23]
(153) |11111111|11111110|11100 1fffdc [21]
(154) |11111111|11111111|011000 3fffd8 [22]
(155) |11111111|11111111|1100101 7fffe5 [23]
(156) |11111111|11111111|011001 3fffd9 [22]
(157) |11111111|11111111|1100110 7fffe6 [23]
(158) |11111111|11111111|1100111 7fffe7 [23]
(159) |11111111|11111111|11101111 ffffef [24]
(160) |11111111|11111111|011010 3fffda [22]
(161) |11111111|11111110|11101 1fffdd [21]
(162) |11111111|11111110|1001 fffe9 [20]
(163) |11111111|11111111|011011 3fffdb [22]
(164) |11111111|11111111|011100 3fffdc [22]
(165) |11111111|11111111|1101000 7fffe8 [23]
(166) |11111111|11111111|1101001 7fffe9 [23]
(167) |11111111|11111110|11110 1fffde [21]
(168) |11111111|11111111|1101010 7fffea [23]
(169) |11111111|11111111|011101 3fffdd [22]
(170) |11111111|11111111|011110 3fffde [22]
(171) |11111111|11111111|11110000 fffff0 [24]
(172) |11111111|11111110|11111 1fffdf [21]
(173) |11111111|11111111|011111 3fffdf [22]
(174) |11111111|11111111|1101011 7fffeb [23]
(175) |11111111|11111111|1101100 7fffec [23]
(176) |11111111|11111111|00000 1fffe0 [21]
(177) |11111111|11111111|00001 1fffe1 [21]
(178) |11111111|11111111|100000 3fffe0 [22]
(179) |11111111|11111111|00010 1fffe2 [21]
(180) |11111111|11111111|1101101 7fffed [23]
(181) |11111111|11111111|100001 3fffe1 [22]
(182) |11111111|11111111|1101110 7fffee [23]
(183) |11111111|11111111|1101111 7fffef [23]
(184) |11111111|11111110|1010 fffea [20]
(185) |11111111|11111111|100010 3fffe2 [22]
(186) |11111111|11111111|100011 3fffe3 [22]
(187) |11111111|11111111|100100 3fffe4 [22]
(188) |11111111|11111111|1110000 7ffff0 [23]
(189) |11111111|11111111|100101 3fffe5 [22]
(190) |11111111|11111111|100110 3fffe6 [22]
(191) |11111111|11111111|1110001 7ffff1 [23]
(192) |11111111|11111111|11111000|00 3ffffe0 [26]
(193) |11111111|11111111|11111000|01 3ffffe1 [26]
(194) |11111111|11111110|1011 fffeb [20]
(195) |11111111|11111110|001 7fff1 [19]
(196) |11111111|11111111|100111 3fffe7 [22]
(197) |11111111|11111111|1110010 7ffff2 [23]
(198) |11111111|11111111|101000 3fffe8 [22]
(199) |11111111|11111111|11110110|0 1ffffec [25]
(200) |11111111|11111111|11111000|10 3ffffe2 [26]
(201) |11111111|11111111|11111000|11 3ffffe3 [26]
(202) |11111111|11111111|11111001|00 3ffffe4 [26]
(203) |11111111|11111111|11111011|110 7ffffde [27]
(204) |11111111|11111111|11111011|111 7ffffdf [27]
(205) |11111111|11111111|11111001|01 3ffffe5 [26]
(206) |11111111|11111111|11110001 fffff1 [24]
(207) |11111111|11111111|11110110|1 1ffffed [25]
(208) |11111111|11111110|010 7fff2 [19]
(209) |11111111|11111111|00011 1fffe3 [21]
(210) |11111111|11111111|11111001|10 3ffffe6 [26]
(211) |11111111|11111111|11111100|000 7ffffe0 [27]
(212) |11111111|11111111|11111100|001 7ffffe1 [27]
(213) |11111111|11111111|11111001|11 3ffffe7 [26]
(214) |11111111|11111111|11111100|010 7ffffe2 [27]
(215) |11111111|11111111|11110010 fffff2 [24]
(216) |11111111|11111111|00100 1fffe4 [21]
(217) |11111111|11111111|00101 1fffe5 [21]
(218) |11111111|11111111|11111010|00 3ffffe8 [26]
(219) |11111111|11111111|11111010|01 3ffffe9 [26]
(220) |11111111|11111111|11111111|1101 ffffffd [28]
(221) |11111111|11111111|11111100|011 7ffffe3 [27]
(222) |11111111|11111111|11111100|100 7ffffe4 [27]
(223) |11111111|11111111|11111100|101 7ffffe5 [27]
(224) |11111111|11111110|1100 fffec [20]
(225) |11111111|11111111|11110011 fffff3 [24]
(226) |11111111|11111110|1101 fffed [20]
(227) |11111111|11111111|00110 1fffe6 [21]
(228) |11111111|11111111|101001 3fffe9 [22]
(229) |11111111|11111111|00111 1fffe7 [21]
(230) |11111111|11111111|01000 1fffe8 [21]
(231) |11111111|11111111|1110011 7ffff3 [23]
(232) |11111111|11111111|101010 3fffea [22]
(233) |11111111|11111111|101011 3fffeb [22]
(234) |11111111|11111111|11110111|0 1ffffee [25]
(235) |11111111|11111111|11110111|1 1ffffef [25]
(236) |11111111|11111111|11110100 fffff4 [24]
(237) |11111111|11111111|11110101 fffff5 [24]
(238) |11111111|11111111|11111010|10 3ffffea [26]
(239) |11111111|11111111|1110100 7ffff4 [23]
(240) |11111111|11111111|11111010|11 3ffffeb [26]
(241) |11111111|11111111|11111100|110 7ffffe6 [27]
(242) |11111111|11111111|11111011|00 3ffffec [26]
(243) |11111111|11111111|11111011|01 3ffffed [26]
(244) |11111111|11111111|11111100|111 7ffffe7 [27]
(245) |11111111|11111111|11111101|000 7ffffe8 [27]
(246) |11111111|11111111|11111101|001 7ffffe9 [27]
(247) |11111111|11111111|11111101|010 7ffffea [27]
(248) |11111111|11111111|11111101|011 7ffffeb [27]
(249) |11111111|11111111|11111111|1110 ffffffe [28]
(250) |11111111|11111111|11111101|100 7ffffec [27]
(251) |11111111|11111111|11111101|101 7ffffed [27]
(252) |11111111|11111111|11111101|110 7ffffee [27]
(253) |11111111|11111111|11111101|111 7ffffef [27]
(254) |11111111|11111111|11111110|000 7fffff0 [27]
(255) |11111111|11111111|11111011|10 3ffffee [26]
EOS (256) |11111111|11111111|11111111|111111 3fffffff [30]
"""
count = 0
spec_lines = SPEC_DATA_DRAFT_08.splitlines()
for l in spec_lines:
m = re.match(
r"^\s*('.+'|EOS)? \( *(\d+)\) \|([10\|]+) +\w+ \[ ?(\d+)\]", l)
if m:
g = m.groups()
spec_comment = g[0]
spec_bitstring = g[2]
bitstring = spec_bitstring.replace('|','').ljust(32,'0')
bitvalue = int(bitstring, 2)
bitcount = g[3]
valueid = g[1]
comment = ' // %s' % spec_bitstring
if spec_comment:
comment = ' // %3s %s' % (spec_comment, spec_bitstring)
else:
comment = ' // %s' % spec_bitstring
print(' {0x%08xul, %02s, %03s},%s' % (
bitvalue, bitcount, valueid, comment))
assert int(valueid) == count, "Expected data for %s, got %s." % (count,
valueid)
count += 1
print("Total: %s" % count)
assert count == 257, "Expected 257 values, got %d." % count
| bsd-3-clause | 1,459,966,701,684,311,300 | 60.409836 | 80 | 0.430486 | false |
goblinbr/python-flask-rest-api-example | test/test_apiv1p0.py | 1 | 12666 | import unittest
from run import app
from app import database
import json
class TestApiv1p0(unittest.TestCase):
def setUp(self):
self.server = app.test_client()
database.clear()
self.user1 = database.create_user({'name': 'new user 1'})
self.user2 = database.create_user({'name': 'new user 2'})
self.headers_user1 = {'Authorization' : 'token %s' % self.user1['token']}
self.headers_user2 = {'Authorization': 'token %s' % self.user2['token']}
def test_get_todo_list(self):
database.create_todo(self.user1, {'title': 'Buy ice cream'})
database.create_todo(self.user1, {'title': 'Visit grandpa'})
database.create_todo(self.user2, {'title': 'Watch star wars'})
response = self.server.get('/api/v1.0/todo', headers=self.headers_user1)
self.assertEqual(200, response.status_code)
data = json.loads(response.data)
self.assertEqual(2, len(data))
self.assertEqual(1, data[0]['id'])
self.assertEqual('Buy ice cream', data[0]['title'])
self.assertEqual(False, data[0]['done'])
self.assertEqual(2, data[1]['id'])
self.assertEqual('Visit grandpa', data[1]['title'])
self.assertEqual(False, data[1]['done'])
def test_get_todo(self):
todo = database.create_todo(self.user1, {'title': 'Buy ice cream'})
response = self.server.get('/api/v1.0/todo/%s' % todo['id'], headers=self.headers_user1)
self.assertEqual(200, response.status_code)
data = json.loads(response.data)
self.assertEqual(1, data['id'])
self.assertEqual('Buy ice cream', data['title'])
self.assertEqual(False, data['done'])
def test_get_todo_with_invalid_id(self):
response = self.server.get('/api/v1.0/todo/12154', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Todo 12154 not found', data['error'])
def test_get_todo_from_another_user(self):
todo = database.create_todo(self.user2, {'title': 'Buy ice cream'})
response = self.server.get('/api/v1.0/todo/%s' % todo['id'], headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Todo %s not found' % todo['id'], data['error'])
def test_create_todo(self):
response = self.server.post('/api/v1.0/todo', data='{"title": "Test todo"}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(201, response.status_code)
data = json.loads(response.data)
self.assertEqual(1, data['id'])
self.assertEqual('Test todo', data['title'])
self.assertEqual(False, data['done'])
self.assertEqual(self.user1['id'], data['user_id'])
response = self.server.post('/api/v1.0/todo', data='{"title": "Test todo 2"}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(201, response.status_code)
data = json.loads(response.data)
self.assertEqual(2, data['id'])
self.assertEqual('Test todo 2', data['title'])
self.assertEqual(False, data['done'])
def test_create_todo_without_title(self):
response = self.server.post('/api/v1.0/todo', data='{"titlex": "Test todo"}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(400, response.status_code)
data = json.loads(response.data)
self.assertEqual('Title is a required field', data['error'])
def test_create_todo_with_other_fields(self):
response = self.server.post('/api/v1.0/todo', data='{"title": "Test todo", "answer": 42, "id": 5}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(201, response.status_code)
data = json.loads(response.data)
self.assertEqual(1, data['id'])
self.assertEqual('Test todo', data['title'])
self.assertEqual(False, 'answer' in data)
def test_create_with_invalid_json(self):
response = self.server.post('/api/v1.0/todo', data='{jovemnerd.com.br}', headers=self.headers_user1)
self.assertEqual(400, response.status_code)
data = json.loads(response.data)
self.assertEqual('No JSON found', data['error'])
def test_update_todo(self):
created_todo = database.create_todo(self.user1, {"title": "Test todo"})
response = self.server.put('/api/v1.0/todo/%s' % created_todo['id'], data='{"title": "Renamed todo", "done": true}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(201, response.status_code)
data = json.loads(response.data)
self.assertEqual(created_todo['id'], data['id'])
self.assertEqual('Renamed todo', data['title'])
self.assertEqual(True, data['done'])
todo = database.get_todo(self.user1, created_todo['id'])
self.assertEqual('Renamed todo', todo['title'])
self.assertEqual(True, todo['done'])
def test_update_todo_with_invalid_id(self):
response = self.server.put('/api/v1.0/todo/213', data='{"title": "Renamed todo"}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Todo 213 not found', data['error'])
def test_update_todo_with_other_fields(self):
created_todo = database.create_todo(self.user1, {"title": "Test todo"})
response = self.server.put('/api/v1.0/todo/%s' % created_todo['id'], data='{"title": "Renamed todo", "answer": 42, "id": 55}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(201, response.status_code)
data = json.loads(response.data)
self.assertEqual(created_todo['id'], data['id'])
self.assertEqual('Renamed todo', data['title'])
self.assertEqual(False, 'answer' in data)
def test_update_with_invalid_json(self):
response = self.server.post('/api/v1.0/todo', data='{jovemnerd.com.br}', headers=self.headers_user1)
self.assertEqual(400, response.status_code)
data = json.loads(response.data)
self.assertEqual('No JSON found', data['error'])
def test_update_todo_from_another_user(self):
created_todo = database.create_todo(self.user2, {"title": "Test todo"})
response = self.server.put('/api/v1.0/todo/%s' % created_todo['id'], data='{"title": "Renamed todo", "done": true}', content_type='application/json', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Todo %s not found' % created_todo['id'], data['error'])
def test_delete_todo(self):
created_todo = database.create_todo(self.user1, {"title": "Test todo"})
response = self.server.delete('/api/v1.0/todo/%s' % created_todo['id'], headers=self.headers_user1)
self.assertEqual(201, response.status_code)
data = json.loads(response.data)
self.assertEqual(created_todo['id'], data['id'])
self.assertEqual('Test todo', data['title'])
self.assertEqual(False, data['done'])
todo = database.get_todo(self.user1, created_todo['id'])
self.assertEqual(True, todo is None)
def test_delete_todo_with_invalid_id(self):
response = self.server.delete('/api/v1.0/todo/555', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Todo 555 not found', data['error'])
def test_delete_todo_from_another_user(self):
created_todo = database.create_todo(self.user2, {"title": "Test todo"})
response = self.server.delete('/api/v1.0/todo/%s' % created_todo['id'], headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Todo %s not found' % created_todo['id'], data['error'])
def test_create_user(self):
response = self.server.post('/api/v1.0/user', data='{"name": "new user"}', content_type='application/json')
self.assertEqual(201, response.status_code)
user = json.loads(response.data)
self.assertEqual('new user', user['name'])
response = self.server.post('/api/v1.0/todo?token=%s' % user['token'], data='{"title": "Test todo"}', content_type='application/json')
self.assertEqual(201, response.status_code)
data = json.loads(response.data)
response = self.server.get('/api/v1.0/todo?token=%s' % user['token'])
self.assertEqual(200, response.status_code)
todos = json.loads(response.data)
self.assertEqual(1, len(todos))
def test_calls_to_invalid_routes(self):
response = self.server.get('/api/v1.0/banana', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Not found', data['error'])
response = self.server.post('/api/v1.0/banana', data='{"title": "Test"}', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Not found', data['error'])
response = self.server.put('/api/v1.0/banana/1', data='{"title": "Test"}', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Not found', data['error'])
response = self.server.delete('/api/v1.0/banana/1', headers=self.headers_user1)
self.assertEqual(404, response.status_code)
data = json.loads(response.data)
self.assertEqual('Not found', data['error'])
def test_without_token(self):
created_todo = database.create_todo(self.user1, {"title": "Test todo"})
response = self.server.get('/api/v1.0/todo')
self.assertEqual(401, response.status_code)
response = self.server.get('/api/v1.0/todo/%s' % created_todo['id'])
self.assertEqual(401, response.status_code)
response = self.server.post('/api/v1.0/todo', data='{"title": "Test todo"}', content_type='application/json')
self.assertEqual(401, response.status_code)
response = self.server.put('/api/v1.0/todo/%s' % created_todo['id'], data='{"title": "Renamed todo", "done": true}', content_type='application/json')
self.assertEqual(401, response.status_code)
response = self.server.delete('/api/v1.0/todo/%s' % created_todo['id'])
self.assertEqual(401, response.status_code)
def test_calls_with_invalid_token(self):
headers = {'Authorization' : 'token not-a-valid-token'}
created_todo = database.create_todo(self.user1, {"title": "Test todo"})
response = self.server.get('/api/v1.0/todo', headers=headers)
self.assertEqual(401, response.status_code)
response = self.server.get('/api/v1.0/todo/%s' % created_todo['id'], headers=headers)
self.assertEqual(401, response.status_code)
response = self.server.post('/api/v1.0/todo', data='{"title": "Test todo"}', content_type='application/json', headers=headers)
self.assertEqual(401, response.status_code)
response = self.server.put('/api/v1.0/todo/%s' % created_todo['id'], data='{"title": "Renamed todo", "done": true}', content_type='application/json', headers=headers)
self.assertEqual(401, response.status_code)
response = self.server.delete('/api/v1.0/todo/%s' % created_todo['id'], headers=headers)
self.assertEqual(401, response.status_code)
def test_calls_with_user_token_as_url_param(self):
response = self.server.post('/api/v1.0/todo?token=%s' % self.user1['token'], data='{"title": "Test todo"}', content_type='application/json')
self.assertEqual(201, response.status_code)
created_todo = data = json.loads(response.data)
response = self.server.get('/api/v1.0/todo/%s?token=%s' % (created_todo['id'], self.user1['token']))
self.assertEqual(200, response.status_code)
response = self.server.put('/api/v1.0/todo/%s?token=%s' % (created_todo['id'], self.user1['token']), data='{"title": "Renamed test todo"}', content_type='application/json')
self.assertEqual(201, response.status_code)
response = self.server.delete('/api/v1.0/todo/%s?token=%s' % (created_todo['id'], self.user1['token']))
self.assertEqual(201, response.status_code)
if __name__ == '__main__':
unittest.main()
| mit | 1,805,150,519,838,539,500 | 50.697959 | 195 | 0.640928 | false |
gloryofrobots/langs | codegen/node_gen.py | 2 | 2085 | from tpl import render
NODES = [
# internals
"NT_GOTO",
# code nodes
"NT_TRUE",
"NT_FALSE",
"NT_VOID",
"NT_INT",
"NT_FLOAT",
"NT_STR",
"NT_MULTI_STR",
"NT_CHAR",
"NT_WILDCARD",
"NT_NAME",
"NT_TEMPORARY",
"NT_SYMBOL",
"NT_TYPE",
"NT_MAP",
"NT_LIST",
"NT_TUPLE",
"NT_INDEXED",
"NT_UNIT",
"NT_CONS",
"NT_COMMA",
"NT_CASE",
"NT_LENSE",
"NT_LITERAL",
"NT_FUN",
"NT_DEF",
"NT_OVERRIDE",
"NT_USE",
"NT_LAMBDA",
"NT_DISPATCH",
"NT_FARGS",
"NT_CONDITION",
"NT_WHEN",
"NT_MATCH",
"NT_TRY",
"NT_RECEIVE",
"NT_DECORATOR",
"NT_MODULE",
"NT_IMPORT",
"NT_IMPORT_HIDING",
"NT_INCLUDE",
"NT_INCLUDE_HIDING",
"NT_EXPORT",
"NT_LOAD",
"NT_TRAIT",
"NT_EXTEND",
"NT_GENERIC",
"NT_METHOD",
"NT_INTERFACE",
"NT_DESCRIBE",
"NT_BIND",
"NT_THROW",
"NT_REST",
"NT_ASSIGN",
"NT_ASSIGN_FORCE",
"NT_CALL",
"NT_JUXTAPOSITION",
"NT_UNDEFINE",
"NT_LOOKUP",
"NT_IMPORTED_NAME",
"NT_HEAD",
"NT_TAIL",
"NT_DROP",
"NT_RANGE",
"NT_MODIFY",
"NT_OF",
"NT_IS_IMPLEMENTED",
"NT_AS",
"NT_DELAY",
"NT_LET",
"NT_NOT",
"NT_AND",
"NT_OR",
"NT_END_EXPR",
"NT_END",
]
## FOR PYTHON LEXER
print "# ************************ OBIN NODES*****************************"
for number, token in enumerate(NODES):
print "%s = %d" % (token, number)
print "# ************************ OBIN NODES REPR *****************************"
S = "__NT_REPR__ = ["
for name in NODES:
S += "%s, " % str(("\"%s\"" % name))
S += "]"
print S
print
print
print "def node_type_to_s(ntype):"
print " return __NT_REPR__[ntype]"
# print "# ************************ OBIN NODES MAPPING *****************************"
# for name in NODES:
# print "%s: %s," % (name.replace("NT_", "TT_"), name)
# print "# ************************ COMPILE SWITCH*****************************"
# for number, node in enumerate(NODES):
# n_str = node.replace("NT_", "")
# print " elif %s == node_type:" % node
# print " _compile_%s(process, compiler, code, node)" % n_str
| gpl-2.0 | -317,272,711,578,826,000 | 14.676692 | 85 | 0.476259 | false |
schlitzered/pyredis | tests/unit/test_protocol.py | 1 | 10345 | from unittest import TestCase
import pyredis.protocol as hiredis
from pyredis.protocol import writer, to_bytes
import sys
# The class ReaderTest is more or less copied from the hiredis python package.
# The Licence Terms of hiredis (BSD) appeal to the ReaderTest class!
class ReaderTest(TestCase):
def setUp(self):
self.reader = hiredis.Reader()
def reply(self):
return self.reader.gets()
def test_nothing(self):
self.assertEqual(False, self.reply())
def test_error_when_feeding_non_string(self):
self.assertRaises(TypeError, self.reader.feed, 1)
def test_protocol_error(self):
self.reader.feed(b'x')
self.assertRaises(hiredis.ProtocolError, self.reply)
def test_protocol_error_with_custom_class(self):
self.reader = hiredis.Reader(protocolError=RuntimeError)
self.reader.feed(b'x')
self.assertRaises(RuntimeError, self.reply)
def test_protocol_error_with_custom_callable(self):
class CustomException(Exception):
pass
self.reader = hiredis.Reader(protocolError=lambda e: CustomException(e))
self.reader.feed(b'x')
self.assertRaises(CustomException, self.reply)
def test_fail_with_wrong_protocol_error_class(self):
self.assertRaises(TypeError, hiredis.Reader, protocolError='wrong')
def test_error_string(self):
self.reader.feed(b'-error\r\n')
error = self.reply()
self.assertEqual(hiredis.ReplyError, type(error))
self.assertEqual(('error',), error.args)
def test_error_string_partial(self):
self.reader.feed(b'-err')
self.assertFalse(self.reply())
self.reader.feed(b'or\r\n')
error = self.reply()
self.assertEqual(hiredis.ReplyError, type(error))
self.assertEqual(('error',), error.args)
def test_error_string_partial_footer(self):
self.reader.feed(b'-error')
self.assertFalse(self.reply())
self.reader.feed(b'\r')
self.assertFalse(self.reply())
self.reader.feed(b'\n')
error = self.reply()
self.assertEqual(hiredis.ReplyError, type(error))
self.assertEqual(('error',), error.args)
def test_error_string_with_custom_class(self):
self.reader = hiredis.Reader(replyError=RuntimeError)
self.reader.feed(b'-error\r\n')
error = self.reply()
self.assertEqual(RuntimeError, type(error))
self.assertEqual(('error',), error.args)
def test_error_string_with_custom_callable(self):
class CustomException(Exception):
pass
self.reader = hiredis.Reader(replyError=lambda e: CustomException(e))
self.reader.feed(b'-error\r\n')
error = self.reply()
self.assertEqual(CustomException, type(error))
self.assertEqual(('error',), error.args)
def test_fail_with_wrong_reply_error_class(self):
self.assertRaises(TypeError, hiredis.Reader, replyError='wrong')
def test_errors_in_nested_multi_bulk(self):
self.reader.feed(b'*2\r\n-err0\r\n-err1\r\n')
for r, error in zip(('err0', 'err1'), self.reply()):
self.assertEqual(hiredis.ReplyError, type(error))
self.assertEqual((r,), error.args)
def test_integer(self):
value = 2 ** 63 - 1 # Largest 64-bit signed integer
self.reader.feed((':{0}\r\n'.format(value)).encode('ascii'))
self.assertEqual(value, self.reply())
def test_integer_partial_int(self):
value = 2 ** 63 - 1 # Largest 64-bit signed integer
strvalue = str(value).encode('ascii')
part1, part2 = strvalue[:6], strvalue[6:]
self.reader.feed(b':')
self.reader.feed(part1)
self.assertFalse(self.reply())
self.reader.feed(part2)
self.reader.feed(b'\r\n')
self.assertEqual(value, self.reply())
def test_integer_partial_footer(self):
value = 2 ** 63 - 1 # Largest 64-bit signed integer
self.reader.feed((':{0}'.format(value)).encode('ascii'))
self.assertFalse(self.reply())
self.reader.feed(b'\r')
self.assertFalse(self.reply())
self.reader.feed(b'\n')
self.assertEqual(value, self.reply())
def test_status_string(self):
self.reader.feed(b'+ok\r\n')
self.assertEqual(b'ok', self.reply())
def test_status_string_partial(self):
self.reader.feed(b'+ok')
self.assertFalse(self.reply())
self.reader.feed(b'ok\r\n')
self.assertEqual(b'okok', self.reply())
def test_status_string_partial_footer(self):
self.reader.feed(b'+ok')
self.assertFalse(self.reply())
self.reader.feed(b'\r')
self.assertFalse(self.reply())
self.reader.feed(b'\n')
self.assertEqual(b'ok', self.reply())
def test_empty_bulk_string(self):
self.reader.feed(b'$0\r\n\r\n')
self.assertEqual(b'', self.reply())
def test_NULL_bulk_string(self):
self.reader.feed(b'$-1\r\n')
self.assertEqual(None, self.reply())
def test_bulk_string(self):
self.reader.feed(b'$5\r\nhello\r\n')
self.assertEqual(b'hello', self.reply())
def test_bulk_string_partial(self):
self.reader.feed(b'$5\r\nhel')
self.assertFalse(self.reply())
self.assertFalse(self.reply())
self.reader.feed(b'lo\r\n')
self.assertEqual(b'hello', self.reply())
def test_bulk_string_partial_footer(self):
self.reader.feed(b'$5\r\nhello')
self.assertFalse(self.reply())
self.reader.feed(b'\r')
self.assertFalse(self.reply())
self.reader.feed(b'\n')
self.assertEqual(b'hello', self.reply())
def test_bulk_string_without_encoding(self):
snowman = b'\xe2\x98\x83'
self.reader.feed(b'$3\r\n' + snowman + b'\r\n')
self.assertEqual(snowman, self.reply())
def test_bulk_string_with_encoding(self):
snowman = b'\xe2\x98\x83'
self.reader = hiredis.Reader(encoding='utf-8')
self.reader.feed(b'$3\r\n' + snowman + b'\r\n')
self.assertEqual(snowman.decode('utf-8'), self.reply())
def test_bulk_string_with_other_encoding(self):
snowman = b'\xe2\x98\x83'
self.reader = hiredis.Reader(encoding='utf-32')
self.reader.feed(b'$3\r\n' + snowman + b'\r\n')
self.assertEqual(snowman, self.reply())
def test_bulk_string_with_invalid_encoding(self):
self.reader = hiredis.Reader(encoding='unknown')
self.reader.feed(b'$5\r\nhello\r\n')
self.assertRaises(LookupError, self.reply)
def test_null_multi_bulk(self):
self.reader.feed(b'*-1\r\n')
self.assertEqual(None, self.reply())
def test_empty_multi_bulk(self):
self.reader.feed(b'*0\r\n')
self.assertEqual([], self.reply())
def test_multi_bulk(self):
self.reader.feed(b'*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n')
self.assertEqual([b'hello', b'world'], self.reply())
def test_multi_bulk_with_partial_reply(self):
self.reader.feed(b'*2\r\n$5\r\nhello\r\n')
self.assertEqual(False, self.reply())
self.reader.feed(b':1\r\n')
self.assertEqual([b'hello', 1], self.reply())
def test_nested_multi_bulk(self):
self.reader.feed(b'*2\r\n*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n$1\r\n!\r\n')
self.assertEqual([[b'hello', b'world'], b'!'], self.reply())
def test_nested_multi_bulk_partial(self):
self.reader.feed(b'*2\r\n*2\r\n$5\r\nhello\r')
self.assertEqual(False, self.reply())
self.reader.feed(b'\n$5\r\nworld\r\n$1\r\n!\r\n')
self.assertEqual([[b'hello', b'world'], b'!'], self.reply())
def test_nested_multi_bulk_depth(self):
self.reader.feed(b'*1\r\n*1\r\n*1\r\n*1\r\n$1\r\n!\r\n')
self.assertEqual([[[[b'!']]]], self.reply())
def test_subclassable(self):
class TestReader(hiredis.Reader):
def __init__(self, *args, **kwargs):
super(TestReader, self).__init__(*args, **kwargs)
reader = TestReader()
reader.feed(b'+ok\r\n')
self.assertEqual(b'ok', reader.gets())
def test_invalid_offset(self):
data = b'+ok\r\n'
self.assertRaises(ValueError, self.reader.feed, data, 6)
def test_invalid_length(self):
data = b'+ok\r\n'
self.assertRaises(ValueError, self.reader.feed, data, 0, 6)
def test_ok_offset(self):
data = b'blah+ok\r\n'
self.reader.feed(data, 4)
self.assertEqual(b'ok', self.reply())
def test_ok_length(self):
data = b'blah+ok\r\n'
self.reader.feed(data, 4, len(data) - 4)
self.assertEqual(b'ok', self.reply())
def test_feed_bytearray(self):
if sys.hexversion >= 0x02060000:
self.reader.feed(bytearray(b'+ok\r\n'))
self.assertEqual(b'ok', self.reply())
class TestWriter(TestCase):
def test_encode_0_args(self):
expected = b'*0\r\n'
self.assertEqual(
writer(),
expected)
def test_encode_1_args(self):
expected = b'*1\r\n$4\r\nPING\r\n'
self.assertEqual(
writer('PING'),
expected)
def test_encode_2_args(self):
expected = b'*2\r\n$4\r\nECHO\r\n$14\r\nTest!!!!111elf\r\n'
self.assertEqual(
writer('ECHO', 'Test!!!!111elf'),
expected)
def test_encode_3_args(self):
expected = b'*3\r\n$3\r\nSET\r\n$8\r\nKey/Name\r\n$19\r\nSomeValue_?#!\xc3\x84\xc3\x9c\xc3\x96\r\n'
self.assertEqual(
writer('SET', 'Key/Name', 'SomeValue_?#!ÄÜÖ'),
expected)
class TestToBytes(TestCase):
def test_int(self):
expected = b'512'
result = to_bytes(512)
self.assertEqual(result, expected)
def test_float(self):
expected = b'0.815'
result = to_bytes(0.815)
self.assertEqual(result, expected)
def test_str(self):
expected = b'\xc3\xbc\xc3\x9f_blarg'
result = to_bytes('üß_blarg')
self.assertEqual(result, expected)
def test_bytes(self):
expected = b'0815'
result = to_bytes(b'0815')
self.assertEqual(result, expected)
def test_ValueError(self):
self.assertRaises(ValueError, to_bytes, object())
| mit | -5,533,343,915,852,620,000 | 33.013158 | 107 | 0.605029 | false |
murat1985/bagpipe-bgp | bagpipe/exabgp/structure/rtc.py | 1 | 3836 | """
Copyright (c) 2014, Orange
All rights reserved.
File released under the BSD 3-Clause license.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from struct import pack,unpack
from bagpipe.exabgp.structure.asn import ASN
from bagpipe.exabgp.structure.address import AFI,SAFI
from bagpipe.exabgp.message.update.attribute.communities import RouteTarget
class RouteTargetConstraint(object):
# TODO: no support yet for RTC variable length with prefixing
def __init__(self,afi,safi,origin_as,route_target):
self.afi = AFI(afi)
self.safi = SAFI(safi)
self.origin_as = origin_as
self.route_target = route_target
def __len__(self):
if self.route_target is None:
return 1
else:
return (5 + len(self.route_target))
def __str__ (self):
if self.route_target is None:
return "RTC Wildcard"
else:
return "RTC<%s>:%s" % ( self.origin_as, self.route_target )
def __repr__(self):
return self.__str__()
def __cmp__(self,other):
if (isinstance(other,RouteTargetConstraint) and
self.origin_as == other.origin_as and
self.route_target == other.route_target):
return 0
else:
return -1
def __hash__(self):
return hash(self.pack())
@staticmethod
def resetFlags(char):
return chr(ord(char) & ~(0x40))
def pack(self):
if self.route_target == None:
return pack("!B",0)
else:
packedRT = self.route_target.pack()
# We reset ext com flag bits from the first byte in the packed RT
# because in an RTC route these flags never appear.
return pack("!BL", len(self)*8, self.origin_as) + RouteTargetConstraint.resetFlags(packedRT[0]) + packedRT[1:]
@staticmethod
def unpack(afi,safi,data):
len_in_bits = ord(data[0])
data=data[1:]
if (len_in_bits==0):
return RouteTargetConstraint(afi,safi,ASN(0),None)
if (len_in_bits<4):
raise Exception("RTC route too short to be decoded (len %d bits)" % len_in_bits)
asn = ASN( unpack('!L', data[0:4] )[0] )
data = data[4:]
rt = RouteTarget.unpackFrom(data)
return RouteTargetConstraint(afi,safi,asn,rt)
| apache-2.0 | 5,667,648,205,401,776,000 | 35.188679 | 122 | 0.660323 | false |
mdlavin/billiards-analyzer | analyzer/analyze.py | 1 | 10916 | import pymc as pm
import numpy as np
import markov
import markov_symbolic
import math
import itertools
import functools
import collections
class Match(object):
def __init__(self, players, winning_team, order="unordered",
foul_end=False):
self.order = order
self.players = players
self.winning_team = winning_team
self.foul_end = foul_end
# If there are only two players then they are at least partially
# ordered
if order == "unordered" and len(players) == 2:
self.order = "partial"
def reorder(players, winning_team, order_variation):
# For odd orderings, the winning team's position changes
if order_variation % 2 != 0:
winning_team = (winning_team + 1) % 2
if len(players) % 2 != 0:
raise ValueError("There must be an even number of players")
if len(players) > 4:
raise ValueError("More than 4 players is not currently supported")
if order_variation == 0:
return (players, winning_team)
if order_variation < len(players):
deque = collections.deque(players)
deque.rotate(order_variation)
return (list(deque), winning_team)
if len(players) <= 2:
raise ValueError("There are only two orderings for two player matches")
temp_players = list(players)
temp_players[0] = players[2]
temp_players[2] = players[0]
if order_variation >= len(players) * 2:
raise ValueError("There are only eight possible orderings with " +
"four players")
deque = collections.deque(temp_players)
deque.rotate(order_variation-len(players))
return (list(deque), winning_team)
def value(v):
if isinstance(v, pm.Variable):
return v.value
else:
return v
class MatchEvaluator(object):
def eval_unordered(self, players, winning_team, foul_end):
orderings = range(len(players) * 2)
return self._eval_with_orderings(players, winning_team,
orderings, foul_end)
def eval_partial_ordered(self, players, winning_team, foul_end):
orderings = range(len(players))
return self._eval_with_orderings(players, winning_team,
orderings, foul_end)
def _eval_with_orderings(self, players, winning_team, orderings, foul_end):
count=0
total=0
for ordering in orderings:
total += self.eval_with_order(players, winning_team,
ordering, foul_end)
count += 1
return total/count
def eval_with_order(self, players, winning_team, order, foul_end):
(players, winning_team) = reorder(players, winning_team, order)
return self.eval(players, winning_team, foul_end)
def eval(self, players, winning_team, foul_end):
raise Exception("The eval method must be implemented")
class MarkovMatchEvaluator(MatchEvaluator):
def _build_uninitialized_chain(self, num_players,
markov=markov, ballsPerTeam=8):
chain = self._create_new_chain()
for team_a_balls in range(ballsPerTeam):
chain.new_state( (0, 'win', 0, team_a_balls) )
chain.new_state( (1, 'win', team_a_balls, 0) )
for team_b_balls in range(ballsPerTeam):
chain.new_state( (0, 'foul-win', team_a_balls, team_b_balls) )
chain.new_state( (1, 'foul-win', team_a_balls, team_b_balls) )
for player_num in range(num_players):
chain.new_state( (player_num, team_a_balls, team_b_balls) )
return chain
def _win_states(self, winning_team, foul_end):
def _create_state(winning_balls, losing_balls):
state_label = 'foul-win' if foul_end else 'win'
if winning_team == 0:
team_a_balls = winning_balls
team_b_balls = losing_balls
else:
team_a_balls = losing_balls
team_b_balls = winning_balls
return (winning_team, state_label, team_a_balls, team_b_balls)
for losing_team_balls in range(8):
if foul_end:
for winning_team_balls in range(8):
yield _create_state(winning_team_balls, losing_team_balls)
else:
yield _create_state(0, losing_team_balls)
def _set_state_transitions(self, players, chain, ballsPerTeam=8):
for i in range(len(players)):
next_player_index = (i+1) % len(players)
chance_of_sink = players[i]['sink']
chance_of_foul_end = players[i]['foul_end']
chance_of_miss = 1 - (chance_of_sink + chance_of_foul_end)
for team_a_balls in range(ballsPerTeam):
for team_b_balls in range(ballsPerTeam):
next_player_state = chain.get_state(
(next_player_index, team_a_balls, team_b_balls)
)
player_state = chain.get_state(
(i, team_a_balls, team_b_balls)
)
# Create the player miss transition
chain.set_transition(player_state,
next_player_state,
chance_of_miss)
# Create the foul win transition
foul_end_state = chain.get_state(
((i+1) % 2, 'foul-win', team_a_balls, team_b_balls)
)
chain.set_transition(player_state, foul_end_state,
chance_of_foul_end)
# Create the sink transition
if i % 2 == 0:
if team_a_balls == 0:
sink_label = (0, 'win', 0, team_b_balls)
else:
sink_label = (i, team_a_balls-1, team_b_balls)
else:
if team_b_balls == 0:
sink_label = (1, 'win', team_a_balls, 0)
else:
sink_label = (i, team_a_balls, team_b_balls-1)
sink_state = chain.get_state(sink_label)
chain.set_transition(player_state, sink_state,
chance_of_sink)
def _create_new_chain(self):
raise Exception("The _create_new_chain method must be implemented")
def build_chain(self, players, ballsPerTeam=8):
if len(players) % 2 != 0:
raise ValueError("The number of players must be even")
# Create states before the transition probabilities so they'll be ready
# to reference
chain = self._build_uninitialized_chain(len(players),
ballsPerTeam=ballsPerTeam)
self._set_state_transitions(players,
chain,
ballsPerTeam=ballsPerTeam)
return chain
class NumericMarkovMatchEvaluator(MarkovMatchEvaluator):
def _create_new_chain(self):
return markov.Chain()
def eval(self, players, winning_team, foul_end):
if winning_team != 0 and winning_team != 1:
raise ValueError("The winning_team must be either 0 or 1 " +
"but was " + str(winning_team))
# Make sure that all of the player values are numeric
new_players = []
for player in players:
new_player = {}
new_player['sink'] = value(player['sink'])
new_player['foul_end'] = value(player['foul_end'])
new_players.append(new_player)
chain = self.build_chain(new_players)
player_0_start = chain.get_state( (0, 7, 7) )
result = chain.steady_state(player_0_start)
total = 0
for end_state in self._win_states(winning_team, foul_end):
total += result[chain.get_state(end_state)]
return total
class SymbolicMarkovMatchEvaluator(MarkovMatchEvaluator):
def _create_new_chain(self):
return markov_symbolic.Chain()
def sum_less_than_one(vars):
if sum(vars) <= 1:
return 0.0
else:
return -pm.inf
def new_player(name, sink=0.5, foul=1e-10):
player = {}
player['sink'] = pm.Beta(name + "_sink", alpha=3, beta=3, value=sink)
player['foul_end'] = pm.Beta(name + "_foul_end", alpha=3,
beta=3, value=foul)
vars = player.values()
player['balance'] = pm.Potential(logp = sum_less_than_one,
name = name + "_balance",
parents = {'vars': vars},
doc = name + "_balance")
return player
def all_matches(matches, match_evaluator):
match_vars = []
for i in range(0,len(matches)):
match=matches[i]
match_name = 'match_%i' % i
if match.order == "unordered":
order = pm.DiscreteUniform('match_%i_order' % i,
lower=0,
upper=len(match.players)*2 - 1)
else:
observed = match.order == "total"
order = pm.DiscreteUniform('match_%i_order' % i,
value=0,
lower=0,
observed=observed,
upper=len(match.players) - 1)
eval_func = match_evaluator.eval_with_order
parents = {'players': match.players,
'winning_team': match.winning_team,
'order': order,
'foul_end': match.foul_end}
match_var = pm.Deterministic(eval = eval_func,
doc = match_name,
name = match_name,
parents = parents,
plot=False,
dtype=float);
match_vars.append(match_var)
return match_vars
def outcomes(match_vars):
outcome_vars = []
for i in range(0,len(match_vars)):
outcome_vars.append(pm.Bernoulli('outcome_%i' % i,
match_vars[i],
value=[True],
observed=True,
plot=False))
return outcome_vars
| mit | 2,051,371,986,648,766,200 | 36.641379 | 79 | 0.504031 | false |
hendrasaputra/hendrasaputra.com | analytics/misc/log-analytics/import_logs.py | 3 | 56979 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Piwik - Open source web analytics
#
# @link http://piwik.org
# @license http://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
# @version $Id$
#
# For more info see: http://piwik.org/log-analytics/
import base64
import bz2
import ConfigParser
import datetime
import fnmatch
import gzip
import hashlib
import httplib
import inspect
import itertools
import logging
import optparse
import os
import os.path
import Queue
import re
import sys
import threading
import time
import urllib
import urllib2
import urlparse
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
if sys.version_info < (2, 6):
print >> sys.stderr, 'simplejson (http://pypi.python.org/pypi/simplejson/) is required.'
sys.exit(1)
##
## Constants.
##
STATIC_EXTENSIONS = (
'gif jpg jpeg png bmp ico svg ttf eot woff class swf css js xml robots.txt'
).split()
DOWNLOAD_EXTENSIONS = (
'7z aac arc arj asf asx avi bin csv deb dmg doc exe flv gz gzip hqx '
'jar mpg mp2 mp3 mp4 mpeg mov movie msi msp odb odf odg odp '
'ods odt ogg ogv pdf phps ppt qt qtm ra ram rar rpm sea sit tar tbz '
'bz2 tbz tgz torrent txt wav wma wmv wpd xls xml z zip'
).split()
# A good source is: http://phpbb-bots.blogspot.com/
EXCLUDED_USER_AGENTS = (
'adsbot-google',
'ask jeeves',
'bot-',
'bot/',
'ccooter/',
'crawl',
'curl',
'echoping',
'exabot',
'feed',
'googlebot',
'ia_archiver',
'java/',
'libwww',
'mediapartners-google',
'msnbot',
'netcraftsurvey',
'panopta',
'robot',
'spider',
'surveybot',
'twiceler',
'voilabot',
'yahoo',
'yandex',
)
PIWIK_MAX_ATTEMPTS = 3
PIWIK_DELAY_AFTER_FAILURE = 2
PIWIK_EXPECTED_IMAGE = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAAAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='
)
##
## Formats.
##
class RegexFormat(object):
def __init__(self, name, regex, date_format='%d/%b/%Y:%H:%M:%S'):
self.name = name
if regex is not None:
self.regex = re.compile(regex)
self.date_format = date_format
def check_format(self, file):
line = file.readline()
file.seek(0)
return self.check_format_line(line)
def check_format_line(self, line):
return re.match(self.regex, line)
class IisFormat(RegexFormat):
def __init__(self):
super(IisFormat, self).__init__('iis', None, '%Y-%m-%d %H:%M:%S')
def check_format(self, file):
line = file.readline()
if not line.startswith('#Software: Microsoft Internet Information Services '):
file.seek(0)
return
# Skip the next 2 lines.
for i in xrange(2):
file.readline()
# Parse the 4th line (regex)
full_regex = []
line = file.readline()
fields = {
'date': '(?P<date>^\d+[-\d+]+',
'time': '[\d+:]+)',
'cs-uri-stem': '(?P<path>/\S*)',
'cs-uri-query': '(?P<query_string>\S*)',
'c-ip': '(?P<ip>[\d*.]*)',
'cs(User-Agent)': '(?P<user_agent>\S+)',
'cs(Referer)': '(?P<referrer>\S+)',
'sc-status': '(?P<status>\d+)',
'sc-bytes': '(?P<length>\S+)',
'cs-host': '(?P<host>\S+)',
}
# Skip the 'Fields: ' prefix.
line = line[9:]
for field in line.split():
try:
regex = fields[field]
except KeyError:
regex = '\S+'
full_regex.append(regex)
self.regex = re.compile(' '.join(full_regex))
start_pos = file.tell()
nextline = file.readline()
file.seek(start_pos)
return self.check_format_line(nextline)
_HOST_PREFIX = '(?P<host>[\w\-\.]*)(?::\d+)? '
_COMMON_LOG_FORMAT = (
'(?P<ip>\S+) \S+ \S+ \[(?P<date>.*?) (?P<timezone>.*?)\] '
'"\S+ (?P<path>.*?) \S+" (?P<status>\S+) (?P<length>\S+)'
)
_NCSA_EXTENDED_LOG_FORMAT = (_COMMON_LOG_FORMAT +
' "(?P<referrer>.*?)" "(?P<user_agent>.*?)"'
)
_S3_LOG_FORMAT = (
'\S+ (?P<host>\S+) \[(?P<date>.*?) (?P<timezone>.*?)\] (?P<ip>\S+) '
'\S+ \S+ \S+ \S+ "\S+ (?P<path>.*?) \S+" (?P<status>\S+) \S+ (?P<length>\S+) '
'\S+ \S+ \S+ "(?P<referrer>.*?)" "(?P<user_agent>.*?)"'
)
_ICECAST2_LOG_FORMAT = ( _NCSA_EXTENDED_LOG_FORMAT +
' (?P<session_time>\S+)'
)
FORMATS = {
'common': RegexFormat('common', _COMMON_LOG_FORMAT),
'common_vhost': RegexFormat('common_vhost', _HOST_PREFIX + _COMMON_LOG_FORMAT),
'ncsa_extended': RegexFormat('ncsa_extended', _NCSA_EXTENDED_LOG_FORMAT),
'common_complete': RegexFormat('common_complete', _HOST_PREFIX + _NCSA_EXTENDED_LOG_FORMAT),
'iis': IisFormat(),
's3': RegexFormat('s3', _S3_LOG_FORMAT),
'icecast2': RegexFormat('icecast2', _ICECAST2_LOG_FORMAT),
}
##
## Code.
##
class Configuration(object):
"""
Stores all the configuration options by reading sys.argv and parsing,
if needed, the config.inc.php.
It has 2 attributes: options and filenames.
"""
class Error(Exception):
pass
def _create_parser(self):
"""
Initialize and return the OptionParser instance.
"""
option_parser = optparse.OptionParser(
usage='Usage: %prog [options] log_file [ log_file [...] ]',
description="Import HTTP access logs to Piwik. "
"log_file is the path to a server access log file (uncompressed, .gz, .bz2, or specify - to read from stdin). "
" By default, the script will try to produce clean reports and will exclude bots, static files, discard http error and redirects, etc. This is customizable, see below.",
epilog="About Piwik Server Log Analytics: http://piwik.org/log-analytics/ "
" Found a bug? Please create a ticket in http://dev.piwik.org/ "
" Please send your suggestions or successful user story to [email protected] "
)
option_parser.add_option(
'--debug', '-d', dest='debug', action='count', default=0,
help="Enable debug output (specify multiple times for more verbose)",
)
option_parser.add_option(
'--url', dest='piwik_url',
help="REQUIRED Piwik base URL, eg. http://example.com/piwik/ or http://analytics.example.net",
)
option_parser.add_option(
'--dry-run', dest='dry_run',
action='store_true', default=False,
help="Perform a trial run with no tracking data being inserted into Piwik",
)
option_parser.add_option(
'--show-progress', dest='show_progress',
action='store_true', default=os.isatty(sys.stdout.fileno()),
help="Print a progress report X seconds (default: 1, use --show-progress-delay to override)"
)
option_parser.add_option(
'--show-progress-delay', dest='show_progress_delay',
type='int', default=1,
help="Change the default progress delay"
)
option_parser.add_option(
'--add-sites-new-hosts', dest='add_sites_new_hosts',
action='store_true', default=False,
help="When a hostname is found in the log file, but not matched to any website "
"in Piwik, automatically create a new website in Piwik with this hostname to "
"import the logs"
)
option_parser.add_option(
'--idsite', dest='site_id',
help= ("When specified, "
"data in the specified log files will be tracked for this Piwik site ID."
" The script will not auto-detect the website based on the log line hostname (new websites will not be automatically created).")
)
option_parser.add_option(
'--idsite-fallback', dest='site_id_fallback',
help="Default Piwik site ID to use if the hostname doesn't match any "
"known Website's URL. New websites will not be automatically created. "
" Used only if --add-sites-new-hosts or --idsite are not set",
)
default_config = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../config/config.ini.php'),
)
option_parser.add_option(
'--config', dest='config_file', default=default_config,
help=(
"This is only used when --login and --password is not used. "
"Piwik will read the configuration file (default: %default) to "
"fetch the Super User token_auth from the config file. "
)
)
option_parser.add_option(
'--login', dest='login',
help="You can manually specify the Piwik Super User login"
)
option_parser.add_option(
'--password', dest='password',
help="You can manually specify the Piwik Super User password"
)
option_parser.add_option(
'--token-auth', dest='piwik_token_auth',
help="Piwik Super User token_auth, 32 characters hexadecimal string, found in Piwik > API",
)
option_parser.add_option(
'--hostname', dest='hostnames', action='append', default=[],
help="Accepted hostname (requests with other hostnames will be excluded). "
"Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path', dest='excluded_paths', action='append', default=[],
help="Paths to exclude. Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path-from', dest='exclude_path_from',
help="Each line from this file is a path to exclude"
)
option_parser.add_option(
'--useragent-exclude', dest='excluded_useragents',
action='append', default=[],
help="User agents to exclude (in addition to the standard excluded "
"user agents). Can be specified multiple times",
)
option_parser.add_option(
'--enable-static', dest='enable_static',
action='store_true', default=False,
help="Track static files (images, css, js, etc.)"
)
option_parser.add_option(
'--enable-bots', dest='enable_bots',
action='store_true', default=False,
help="Track bots. All bot visits will have a Custom Variable set with name='Bot' and value='$Bot_user_agent_here$'"
)
option_parser.add_option(
'--enable-http-errors', dest='enable_http_errors',
action='store_true', default=False,
help="Track HTTP errors (status code 4xx or 5xx)"
)
option_parser.add_option(
'--enable-http-redirects', dest='enable_http_redirects',
action='store_true', default=False,
help="Track HTTP redirects (status code 3xx except 304)"
)
option_parser.add_option(
'--enable-reverse-dns', dest='reverse_dns',
action='store_true', default=False,
help="Enable reverse DNS, used to generate the 'Providers' report in Piwik. "
"Disabled by default, as it impacts performance"
)
option_parser.add_option(
'--strip-query-string', dest='strip_query_string',
action='store_true', default=False,
help="Strip the query string from the URL"
)
option_parser.add_option(
'--query-string-delimiter', dest='query_string_delimiter', default='?',
help="The query string delimiter (default: %default)"
)
option_parser.add_option(
'--log-format-name', dest='log_format_name', default=None,
help=("Access log format to detect (supported are: %s). "
"When not specified, the log format will be autodetected by trying all supported log formats."
% ', '.join(sorted(FORMATS.iterkeys())))
)
option_parser.add_option(
'--log-format-regex', dest='log_format_regex', default=None,
help="Access log regular expression. For an example of a supported Regex, see the source code of this file. "
"Overrides --log-format-name"
)
option_parser.add_option(
'--log-hostname', dest='log_hostname', default=None,
help="Force this hostname for a log format that doesn't incldude it. All hits "
"will seem to came to this host"
)
option_parser.add_option(
'--skip', dest='skip', default=0, type='int',
help="Skip the n first lines to start parsing/importing data at a given line for the specified log file",
)
option_parser.add_option(
'--recorders', dest='recorders', default=1, type='int',
help="Number of simultaneous recorders (default: %default). "
"It should be set to the number of CPU cores in your server. "
"You can also experiment with higher values which may increase performance until a certain point",
)
option_parser.add_option(
'--recorder-max-payload-size', dest='recorder_max_payload_size', default=200, type='int',
help="Maximum number of log entries to record in one tracking request (default: %default). "
)
option_parser.add_option(
'--replay-tracking', dest='replay_tracking',
action='store_true', default=False,
help="Replay piwik.php requests found in custom logs (only piwik.php requests expected)"
)
option_parser.add_option(
'--output', dest='output',
help="Redirect output (stdout and stderr) to the specified file"
)
option_parser.add_option(
'--encoding', dest='encoding', default='utf8',
help="Log files encoding (default: %default)"
)
option_parser.add_option(
'--disable-bulk-tracking', dest='use_bulk_tracking',
default=True, action='store_false',
help="Disables use of bulk tracking so recorders record one hit at a time."
)
option_parser.add_option(
'--debug-force-one-hit-every-Ns', dest='force_one_action_interval', default=False, type='float',
help="Debug option that will force each recorder to record one hit every N secs."
)
option_parser.add_option(
'--invalidate-dates', dest='invalidate_dates', default=None,
help="Invalidate reports for the specified dates (format: YYYY-MM-DD,YYYY-MM-DD,...). "
"By default, all dates found in the logs will be invalidated.",
)
option_parser.add_option(
'--force-lowercase-path', dest='force_lowercase_path', default=False, action='store_true',
help="Make URL path lowercase so paths with the same letters but different cases are "
"treated the same."
)
return option_parser
def _parse_args(self, option_parser):
"""
Parse the command line args and create self.options and self.filenames.
"""
self.options, self.filenames = option_parser.parse_args(sys.argv[1:])
if self.options.output:
sys.stdout = sys.stderr = open(self.options.output, 'a+', 0)
if not self.filenames:
print(option_parser.format_help())
sys.exit(1)
# Configure logging before calling logging.{debug,info}.
logging.basicConfig(
format='%(asctime)s: [%(levelname)s] %(message)s',
level=logging.DEBUG if self.options.debug >= 1 else logging.INFO,
)
self.options.excluded_useragents = [s.lower() for s in self.options.excluded_useragents]
if self.options.exclude_path_from:
paths = [path.strip() for path in open(self.options.exclude_path_from).readlines()]
self.options.excluded_paths.extend(path for path in paths if len(path) > 0)
if self.options.excluded_paths:
logging.debug('Excluded paths: %s', ' '.join(self.options.excluded_paths))
if self.options.hostnames:
logging.debug('Accepted hostnames: %s', ', '.join(self.options.hostnames))
else:
logging.debug('Accepted hostnames: all')
if self.options.log_format_regex:
self.format = RegexFormat('custom', self.options.log_format_regex)
elif self.options.log_format_name:
try:
self.format = FORMATS[self.options.log_format_name]
except KeyError:
fatal_error('invalid log format: %s' % self.options.log_format_name)
else:
self.format = None
if not self.options.piwik_url:
fatal_error('no URL given for Piwik')
if not (self.options.piwik_url.startswith('http://') or self.options.piwik_url.startswith('https://')):
self.options.piwik_url = 'http://' + self.options.piwik_url
logging.debug('Piwik URL is: %s', self.options.piwik_url)
if not self.options.piwik_token_auth:
try:
self.options.piwik_token_auth = self._get_token_auth()
except Piwik.Error, e:
fatal_error(e)
logging.debug('Authentication token token_auth is: %s', self.options.piwik_token_auth)
if self.options.recorders < 1:
self.options.recorders = 1
def __init__(self):
self._parse_args(self._create_parser())
def _get_token_auth(self):
"""
If the token auth is not specified in the options, get it from Piwik.
"""
# Get superuser login/password from the options.
logging.debug('No token-auth specified')
if self.options.login and self.options.password:
piwik_login = self.options.login
piwik_password = hashlib.md5(self.options.password).hexdigest()
else:
# Fallback to the given (or default) configuration file, then
# get the token from the API.
logging.debug(
'No credentials specified, reading them from "%s"',
self.options.config_file,
)
config_file = ConfigParser.RawConfigParser()
success = len(config_file.read(self.options.config_file)) > 0
if not success:
fatal_error(
"couldn't open the configuration file, "
"required to get the authentication token"
)
piwik_login = config_file.get('superuser', 'login').strip('"')
piwik_password = config_file.get('superuser', 'password').strip('"')
logging.debug('Using credentials: (login = %s, password = %s)', piwik_login, piwik_password)
try:
api_result = piwik.call_api('UsersManager.getTokenAuth',
userLogin=piwik_login,
md5Password=piwik_password,
_token_auth='',
_url=self.options.piwik_url,
)
except urllib2.URLError, e:
fatal_error('error when fetching token_auth from the API: %s' % e)
try:
return api_result['value']
except KeyError:
# Happens when the credentials are invalid.
message = api_result.get('message')
fatal_error(
'error fetching authentication token token_auth%s' % (
': %s' % message if message else '')
)
def get_resolver(self):
if self.options.site_id:
logging.debug('Resolver: static')
return StaticResolver(self.options.site_id)
else:
logging.debug('Resolver: dynamic')
return DynamicResolver()
class Statistics(object):
"""
Store statistics about parsed logs and recorded entries.
Can optionally print statistics on standard output every second.
"""
class Counter(object):
"""
Simple integers cannot be used by multithreaded programs. See:
http://stackoverflow.com/questions/6320107/are-python-ints-thread-safe
"""
def __init__(self):
# itertools.count's implementation in C does not release the GIL and
# therefore is thread-safe.
self.counter = itertools.count(1)
self.value = 0
def increment(self):
self.value = self.counter.next()
def advance(self, n):
for i in range(n):
self.increment()
def __str__(self):
return str(int(self.value))
def __init__(self):
self.time_start = None
self.time_stop = None
self.piwik_sites = set() # sites ID
self.piwik_sites_created = [] # (hostname, site ID)
self.piwik_sites_ignored = set() # hostname
self.count_lines_parsed = self.Counter()
self.count_lines_recorded = self.Counter()
# Do not match the regexp.
self.count_lines_invalid = self.Counter()
# No site ID found by the resolver.
self.count_lines_no_site = self.Counter()
# Hostname filtered by config.options.hostnames
self.count_lines_hostname_skipped = self.Counter()
# Static files.
self.count_lines_static = self.Counter()
# Ignored user-agents.
self.count_lines_skipped_user_agent = self.Counter()
# Ignored HTTP erors.
self.count_lines_skipped_http_errors = self.Counter()
# Ignored HTTP redirects.
self.count_lines_skipped_http_redirects = self.Counter()
# Downloads
self.count_lines_downloads = self.Counter()
# Misc
self.dates_recorded = set()
self.monitor_stop = False
def set_time_start(self):
self.time_start = time.time()
def set_time_stop(self):
self.time_stop = time.time()
def _compute_speed(self, value, start, end):
delta_time = end - start
if value == 0:
return 0
if delta_time == 0:
return 'very high!'
else:
return value / delta_time
def _round_value(self, value, base=100):
return round(value * base) / base
def _indent_text(self, lines, level=1):
"""
Return an indented text. 'lines' can be a list of lines or a single
line (as a string). One level of indentation is 4 spaces.
"""
prefix = ' ' * (4 * level)
if isinstance(lines, basestring):
return prefix + lines
else:
return '\n'.join(
prefix + line
for line in lines
)
def print_summary(self):
print '''
Logs import summary
-------------------
%(count_lines_recorded)d requests imported successfully
%(count_lines_downloads)d requests were downloads
%(total_lines_ignored)d requests ignored:
%(count_lines_invalid)d invalid log lines
%(count_lines_skipped_user_agent)d requests done by bots, search engines, ...
%(count_lines_skipped_http_errors)d HTTP errors
%(count_lines_skipped_http_redirects)d HTTP redirects
%(count_lines_static)d requests to static resources (css, js, ...)
%(count_lines_no_site)d requests did not match any known site
%(count_lines_hostname_skipped)d requests did not match any requested hostname
Website import summary
----------------------
%(count_lines_recorded)d requests imported to %(total_sites)d sites
%(total_sites_existing)d sites already existed
%(total_sites_created)d sites were created:
%(sites_created)s
%(total_sites_ignored)d distinct hostnames did not match any existing site:
%(sites_ignored)s
%(sites_ignored_tips)s
Performance summary
-------------------
Total time: %(total_time)d seconds
Requests imported per second: %(speed_recording)s requests per second
''' % {
'count_lines_recorded': self.count_lines_recorded.value,
'count_lines_downloads': self.count_lines_downloads.value,
'total_lines_ignored': sum([
self.count_lines_invalid.value,
self.count_lines_skipped_user_agent.value,
self.count_lines_skipped_http_errors.value,
self.count_lines_skipped_http_redirects.value,
self.count_lines_static.value,
self.count_lines_no_site.value,
self.count_lines_hostname_skipped.value,
]),
'count_lines_invalid': self.count_lines_invalid.value,
'count_lines_skipped_user_agent': self.count_lines_skipped_user_agent.value,
'count_lines_skipped_http_errors': self.count_lines_skipped_http_errors.value,
'count_lines_skipped_http_redirects': self.count_lines_skipped_http_redirects.value,
'count_lines_static': self.count_lines_static.value,
'count_lines_no_site': self.count_lines_no_site.value,
'count_lines_hostname_skipped': self.count_lines_hostname_skipped.value,
'total_sites': len(self.piwik_sites),
'total_sites_existing': len(self.piwik_sites - set(site_id for hostname, site_id in self.piwik_sites_created)),
'total_sites_created': len(self.piwik_sites_created),
'sites_created': self._indent_text(
['%s (ID: %d)' % (hostname, site_id) for hostname, site_id in self.piwik_sites_created],
level=3,
),
'total_sites_ignored': len(self.piwik_sites_ignored),
'sites_ignored': self._indent_text(
self.piwik_sites_ignored, level=3,
),
'sites_ignored_tips': '''
TIPs:
- if one of these hosts is an alias host for one of the websites
in Piwik, you can add this host as an "Alias URL" in Settings > Websites.
- use --add-sites-new-hosts if you wish to automatically create
one website for each of these hosts in Piwik rather than discarding
these requests.
- use --idsite-fallback to force all these log lines with a new hostname
to be recorded in a specific idsite (for example for troubleshooting/visualizing the data)
- use --idsite to force all lines in the specified log files
to be all recorded in the specified idsite
- or you can also manually create a new Website in Piwik with the URL set to this hostname
''' if self.piwik_sites_ignored else '',
'total_time': self.time_stop - self.time_start,
'speed_recording': self._round_value(self._compute_speed(
self.count_lines_recorded.value,
self.time_start, self.time_stop,
)),
}
##
## The monitor is a thread that prints a short summary each second.
##
def _monitor(self):
latest_total_recorded = 0
while not self.monitor_stop:
current_total = stats.count_lines_recorded.value
time_elapsed = time.time() - self.time_start
print '%d lines parsed, %d lines recorded, %d records/sec (avg), %d records/sec (current)' % (
stats.count_lines_parsed.value,
current_total,
current_total / time_elapsed if time_elapsed != 0 else 0,
(current_total - latest_total_recorded) / config.options.show_progress_delay,
)
latest_total_recorded = current_total
time.sleep(config.options.show_progress_delay)
def start_monitor(self):
t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def stop_monitor(self):
self.monitor_stop = True
class Piwik(object):
"""
Make requests to Piwik.
"""
class Error(Exception):
pass
@staticmethod
def _call(path, args, headers=None, url=None, data=None):
"""
Make a request to the Piwik site. It is up to the caller to format
arguments, to embed authentication, etc.
"""
if url is None:
url = config.options.piwik_url
headers = headers or {}
if data is None:
# If Content-Type isn't defined, PHP do not parse the request's body.
headers['Content-type'] = 'application/x-www-form-urlencoded'
data = urllib.urlencode(args)
elif not isinstance(data, basestring) and headers['Content-type'] == 'application/json':
data = json.dumps(data)
request = urllib2.Request(url + path, data, headers)
response = urllib2.urlopen(request)
result = response.read()
response.close()
return result
@staticmethod
def _call_api(method, **kwargs):
"""
Make a request to the Piwik API taking care of authentication, body
formatting, etc.
"""
args = {
'module' : 'API',
'format' : 'json',
'method' : method,
}
# token_auth, by default, is taken from config.
token_auth = kwargs.pop('_token_auth', None)
if token_auth is None:
token_auth = config.options.piwik_token_auth
if token_auth:
args['token_auth'] = token_auth
url = kwargs.pop('_url', None)
if kwargs:
args.update(kwargs)
# Convert lists into appropriate format.
# See: http://dev.piwik.org/trac/wiki/API/Reference#PassinganArrayParameter
# Warning: we have to pass the parameters in order: foo[0], foo[1], foo[2]
# and not foo[1], foo[0], foo[2] (it will break Piwik otherwise.)
final_args = []
for key, value in args.iteritems():
if isinstance(value, (list, tuple)):
for index, obj in enumerate(value):
final_args.append(('%s[%d]' % (key, index), obj))
else:
final_args.append((key, value))
res = Piwik._call('/', final_args, url=url)
try:
return json.loads(res)
except ValueError:
raise urllib2.URLError('Piwik returned an invalid response: ' + res[:300])
@staticmethod
def _call_wrapper(func, expected_response, on_failure, *args, **kwargs):
"""
Try to make requests to Piwik at most PIWIK_FAILURE_MAX_RETRY times.
"""
errors = 0
while True:
try:
response = func(*args, **kwargs)
if expected_response is not None and response != expected_response:
if on_failure is not None:
error_message = on_failure(response, kwargs.get('data'))
else:
truncate_after = 200
truncated_response = (response[:truncate_after] + '..') if len(response) > truncate_after else response
error_message = "didn't receive the expected response. Response was %s " % truncated_response
raise urllib2.URLError(error_message)
return response
except (urllib2.URLError, httplib.HTTPException, ValueError), e:
logging.debug('Error when connecting to Piwik: %s', e)
errors += 1
if errors == PIWIK_MAX_ATTEMPTS:
if isinstance(e, urllib2.HTTPError):
# See Python issue 13211.
message = e.msg
elif isinstance(e, urllib2.URLError):
message = e.reason
else:
message = str(e)
raise Piwik.Error(message)
else:
time.sleep(PIWIK_DELAY_AFTER_FAILURE)
@classmethod
def call(cls, path, args, expected_content=None, headers=None, data=None, on_failure=None):
return cls._call_wrapper(cls._call, expected_content, on_failure, path, args, headers,
data=data)
@classmethod
def call_api(cls, method, **kwargs):
return cls._call_wrapper(cls._call_api, None, None, method, **kwargs)
##
## Resolvers.
##
## A resolver is a class that turns a hostname into a Piwik site ID.
##
class StaticResolver(object):
"""
Always return the same site ID, specified in the configuration.
"""
def __init__(self, site_id):
self.site_id = site_id
# Go get the main URL
sites = piwik.call_api(
'SitesManager.getSiteFromId', idSite=self.site_id
)
try:
site = sites[0]
except (IndexError, KeyError):
logging.debug('response for SitesManager.getSiteFromId: %s', str(sites))
fatal_error(
"cannot get the main URL of this site: invalid site ID: %s" % site_id
)
if site.get('result') == 'error':
fatal_error(
"cannot get the main URL of this site: %s" % site.get('message')
)
self._main_url = site['main_url']
stats.piwik_sites.add(self.site_id)
def resolve(self, hit):
return (self.site_id, self._main_url)
def check_format(self, format):
pass
class DynamicResolver(object):
"""
Use Piwik API to determine the site ID.
"""
_add_site_lock = threading.Lock()
def __init__(self):
self._cache = {}
if config.options.replay_tracking:
# get existing sites
self._cache['sites'] = piwik.call_api('SitesManager.getAllSites')
def _get_site_id_from_hit_host(self, hit):
main_url = 'http://' + hit.host
return piwik.call_api(
'SitesManager.getSitesIdFromSiteUrl',
url=main_url,
)
def _add_site(self, hit):
main_url = 'http://' + hit.host
DynamicResolver._add_site_lock.acquire()
try:
# After we obtain the lock, make sure the site hasn't already been created.
res = self._get_site_id_from_hit_host(hit)
if res:
return res[0]['idsite']
# The site doesn't exist.
logging.debug('No Piwik site found for the hostname: %s', hit.host)
if config.options.site_id_fallback is not None:
logging.debug('Using default site for hostname: %s', hit.host)
return config.options.site_id_fallback
elif config.options.add_sites_new_hosts:
if config.options.dry_run:
# Let's just return a fake ID.
return 0
logging.debug('Creating a Piwik site for hostname %s', hit.host)
result = piwik.call_api(
'SitesManager.addSite',
siteName=hit.host,
urls=[main_url],
)
if result.get('result') == 'error':
logging.error("Couldn't create a Piwik site for host %s: %s",
hit.host, result.get('message'),
)
return None
else:
site_id = result['value']
stats.piwik_sites_created.append((hit.host, site_id))
return site_id
else:
# The site doesn't exist, we don't want to create new sites and
# there's no default site ID. We thus have to ignore this hit.
return None
finally:
DynamicResolver._add_site_lock.release()
def _resolve(self, hit):
res = self._get_site_id_from_hit_host(hit)
if res:
# The site already exists.
site_id = res[0]['idsite']
else:
site_id = self._add_site(hit)
if site_id is not None:
stats.piwik_sites.add(site_id)
return site_id
def _resolve_when_replay_tracking(self, hit):
"""
If parsed site ID found in the _cache['sites'] return site ID and main_url,
otherwise return (None, None) tuple.
"""
site_id = hit.args['idsite']
if site_id in self._cache['sites']:
stats.piwik_sites.add(site_id)
return (site_id, self._cache['sites'][site_id]['main_url'])
else:
return (None, None)
def _resolve_by_host(self, hit):
"""
Returns the site ID and site URL for a hit based on the hostname.
"""
try:
site_id = self._cache[hit.host]
except KeyError:
logging.debug(
'Site ID for hostname %s not in cache', hit.host
)
site_id = self._resolve(hit)
logging.debug('Site ID for hostname %s: %s', hit.host, site_id)
self._cache[hit.host] = site_id
return (site_id, 'http://' + hit.host)
def resolve(self, hit):
"""
Return the site ID from the cache if found, otherwise call _resolve.
If replay_tracking option is enabled, call _resolve_when_replay_tracking.
"""
if config.options.replay_tracking:
# We only consider requests with piwik.php which don't need host to be imported
return self._resolve_when_replay_tracking(hit)
else:
return self._resolve_by_host(hit)
def check_format(self, format):
if config.options.replay_tracking:
pass
elif 'host' not in format.regex.groupindex and not config.options.log_hostname:
fatal_error(
"the selected log format doesn't include the hostname: you must "
"specify the Piwik site ID with the --idsite argument"
)
class Recorder(object):
"""
A Recorder fetches hits from the Queue and inserts them into Piwik using
the API.
"""
recorders = []
def __init__(self):
self.queue = Queue.Queue(maxsize=2)
# if bulk tracking disabled, make sure we can store hits outside of the Queue
if not config.options.use_bulk_tracking:
self.unrecorded_hits = []
@classmethod
def launch(cls, recorder_count):
"""
Launch a bunch of Recorder objects in a separate thread.
"""
for i in xrange(recorder_count):
recorder = Recorder()
cls.recorders.append(recorder)
run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single
t = threading.Thread(target=run)
t.daemon = True
t.start()
logging.debug('Launched recorder')
@classmethod
def add_hits(cls, all_hits):
"""
Add a set of hits to the recorders queue.
"""
# Organize hits so that one client IP will always use the same queue.
# We have to do this so visits from the same IP will be added in the right order.
hits_by_client = [[] for r in cls.recorders]
for hit in all_hits:
hits_by_client[abs(hash(hit.ip)) % len(cls.recorders)].append(hit)
for i, recorder in enumerate(cls.recorders):
recorder.queue.put(hits_by_client[i])
@classmethod
def wait_empty(cls):
"""
Wait until all recorders have an empty queue.
"""
for recorder in cls.recorders:
recorder._wait_empty()
def _run_bulk(self):
while True:
hits = self.queue.get()
if len(hits) > 0:
try:
self._record_hits(hits)
except Piwik.Error, e:
fatal_error(e, hits[0].filename, hits[0].lineno) # approximate location of error
self.queue.task_done()
def _run_single(self):
while True:
if config.options.force_one_action_interval != False:
time.sleep(config.options.force_one_action_interval)
if len(self.unrecorded_hits) > 0:
hit = self.unrecorded_hits.pop(0)
try:
self._record_hits([hit])
except Piwik.Error, e:
fatal_error(e, hit.filename, hit.lineno)
else:
self.unrecorded_hits = self.queue.get()
self.queue.task_done()
def _wait_empty(self):
"""
Wait until the queue is empty.
"""
while True:
if self.queue.empty():
# We still have to wait for the last queue item being processed
# (queue.empty() returns True before queue.task_done() is
# called).
self.queue.join()
return
time.sleep(1)
def date_to_piwik(self, date):
date, time = date.isoformat(sep=' ').split()
return '%s %s' % (date, time.replace('-', ':'))
def _get_hit_args(self, hit):
"""
Returns the args used in tracking a hit, without the token_auth.
"""
site_id, main_url = resolver.resolve(hit)
if site_id is None:
# This hit doesn't match any known Piwik site.
if config.options.replay_tracking:
stats.piwik_sites_ignored.add('unrecognized site ID %s' % hit.args.get('idsite'))
else:
stats.piwik_sites_ignored.add(hit.host)
stats.count_lines_no_site.increment()
return
stats.dates_recorded.add(hit.date.date())
path = hit.path
if hit.query_string and not config.options.strip_query_string:
path += config.options.query_string_delimiter + hit.query_string
args = {
'rec': '1',
'apiv': '1',
'url': (main_url + path[:1024]).encode('utf8'),
'urlref': hit.referrer[:1024].encode('utf8'),
'cip': hit.ip,
'cdt': self.date_to_piwik(hit.date),
'idsite': site_id,
'dp': '0' if config.options.reverse_dns else '1',
'ua': hit.user_agent.encode('utf8'),
}
if config.options.replay_tracking:
# prevent request to be force recorded when option replay-tracking
args['rec'] = '0'
args.update(hit.args)
if hit.is_download:
args['download'] = args['url']
if hit.is_robot:
args['_cvar'] = '{"1":["Bot","%s"]}' % hit.user_agent
elif config.options.enable_bots:
args['_cvar'] = '{"1":["Not-Bot","%s"]}' % hit.user_agent
args['bots'] = '1'
if hit.is_error or hit.is_redirect:
args['cvar'] = '{"1":["HTTP-code","%s"]}' % hit.status
args['action_name'] = '%s/URL = %s%s' % (
hit.status,
urllib.quote(args['url'], ''),
("/From = %s" % urllib.quote(args['urlref'], '') if args['urlref'] != '' else '')
)
if hit.generation_time_milli > 0:
args['gt_ms'] = hit.generation_time_milli
return args
def _record_hits(self, hits):
"""
Inserts several hits into Piwik.
"""
data = {
'token_auth': config.options.piwik_token_auth,
'requests': [self._get_hit_args(hit) for hit in hits]
}
if not config.options.dry_run:
piwik.call(
'/piwik.php', args={},
expected_content=PIWIK_EXPECTED_IMAGE,
headers={'Content-type': 'application/json'},
data=data,
on_failure=self._on_tracking_failure
)
stats.count_lines_recorded.advance(len(hits))
def _on_tracking_failure(self, response, data):
"""
Removes the successfully tracked hits from the request payload so
they are not logged twice.
"""
try:
response = json.loads(response)
except:
# the response should be in JSON, but in case it can't be parsed just try another attempt
logging.debug("cannot parse tracker response, should be valid JSON")
return response
# remove the successfully tracked hits from payload
succeeded = response['succeeded']
data['requests'] = data['requests'][succeeded:]
return response['error']
@staticmethod
def invalidate_reports():
if config.options.dry_run or not stats.dates_recorded:
return
if config.options.invalidate_dates is not None:
dates = [date for date in config.options.invalidate_dates.split(',') if date]
else:
dates = [date.strftime('%Y-%m-%d') for date in stats.dates_recorded]
if dates:
print 'Purging Piwik archives for dates: ' + ' '.join(dates)
result = piwik.call_api(
'CoreAdminHome.invalidateArchivedReports',
dates=','.join(dates),
idSites=','.join(str(site_id) for site_id in stats.piwik_sites),
)
print('To re-process these reports with your new update data, execute the '
'piwik/misc/cron/archive.php script, or see: http://piwik.org/setup-auto-archiving/ '
'for more info.')
class Hit(object):
"""
It's a simple container.
"""
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
super(Hit, self).__init__()
if config.options.force_lowercase_path:
self.full_path = self.full_path.lower()
class Parser(object):
"""
The Parser parses the lines in a specified file and inserts them into
a Queue.
"""
def __init__(self):
self.check_methods = [method for name, method
in inspect.getmembers(self, predicate=inspect.ismethod)
if name.startswith('check_')]
## All check_* methods are called for each hit and must return True if the
## hit can be imported, False otherwise.
def check_hostname(self, hit):
# Check against config.hostnames.
if not hasattr(hit, 'host') or not config.options.hostnames:
return True
# Accept the hostname only if it matches one pattern in the list.
result = any(
fnmatch.fnmatch(hit.host, pattern)
for pattern in config.options.hostnames
)
if not result:
stats.count_lines_hostname_skipped.increment()
return result
def check_static(self, hit):
extension = hit.path.rsplit('.')[-1].lower()
if extension in STATIC_EXTENSIONS:
if config.options.enable_static:
hit.is_download = True
return True
else:
stats.count_lines_static.increment()
return False
return True
def check_download(self, hit):
extension = hit.path.rsplit('.')[-1].lower()
if extension in DOWNLOAD_EXTENSIONS:
stats.count_lines_downloads.increment()
hit.is_download = True
return True
def check_user_agent(self, hit):
user_agent = hit.user_agent.lower()
for s in itertools.chain(EXCLUDED_USER_AGENTS, config.options.excluded_useragents):
if s in user_agent:
if config.options.enable_bots:
hit.is_robot = True
return True
else:
stats.count_lines_skipped_user_agent.increment()
return False
return True
def check_http_error(self, hit):
if hit.status[0] in ('4', '5'):
if config.options.enable_http_errors:
hit.is_error = True
return True
else:
stats.count_lines_skipped_http_errors.increment()
return False
return True
def check_http_redirect(self, hit):
if hit.status[0] == '3' and hit.status != '304':
if config.options.enable_http_redirects:
hit.is_redirect = True
return True
else:
stats.count_lines_skipped_http_redirects.increment()
return False
return True
def check_path(self, hit):
for excluded_path in config.options.excluded_paths:
if fnmatch.fnmatch(hit.path, excluded_path):
return False
return True
@staticmethod
def detect_format(file):
"""
Return the best matching format for this file, or None if none was found.
"""
logging.debug('Detecting the log format')
format = None
format_groups = 0
for name, candidate_format in FORMATS.iteritems():
match = candidate_format.check_format(file)
if match:
logging.debug('Format %s matches', name)
# if there's more info in this match, use this format
match_groups = len(match.groups())
if format_groups < match_groups:
format = candidate_format
format_groups = match_groups
else:
logging.debug('Format %s does not match', name)
logging.debug('Format %s is the best match', format.name)
return format
def parse(self, filename):
"""
Parse the specified filename and insert hits in the queue.
"""
def invalid_line(line, reason):
stats.count_lines_invalid.increment()
if config.options.debug >= 2:
logging.debug('Invalid line detected (%s): %s' % (reason, line))
if filename == '-':
filename = '(stdin)'
file = sys.stdin
else:
if not os.path.exists(filename):
print >> sys.stderr, 'File %s does not exist' % filename
return
else:
if filename.endswith('.bz2'):
open_func = bz2.BZ2File
elif filename.endswith('.gz'):
open_func = gzip.open
else:
open_func = open
file = open_func(filename, 'r')
if config.options.show_progress:
print 'Parsing log %s...' % filename
if config.format:
# The format was explicitely specified.
format = config.format
else:
# If the file is empty, don't bother.
data = file.read(100)
if len(data.strip()) == 0:
return
file.seek(0)
format = self.detect_format(file)
if format is None:
return fatal_error(
'Cannot guess the logs format. Please give one using '
'either the --log-format-name or --log-format-regex option'
)
# Make sure the format is compatible with the resolver.
resolver.check_format(format)
hits = []
for lineno, line in enumerate(file):
try:
line = line.decode(config.options.encoding)
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
stats.count_lines_parsed.increment()
if stats.count_lines_parsed.value <= config.options.skip:
continue
match = format.regex.match(line)
if not match:
invalid_line(line, 'line did not match')
continue
hit = Hit(
filename=filename,
lineno=lineno,
status=match.group('status'),
full_path=match.group('path'),
is_download=False,
is_robot=False,
is_error=False,
is_redirect=False,
args={},
)
try:
hit.query_string = match.group('query_string')
hit.path = hit.full_path
except IndexError:
hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)
try:
hit.referrer = match.group('referrer')
except IndexError:
hit.referrer = ''
if hit.referrer == '-':
hit.referrer = ''
try:
hit.user_agent = match.group('user_agent')
except IndexError:
hit.user_agent = ''
hit.ip = match.group('ip')
try:
hit.length = int(match.group('length'))
except (ValueError, IndexError):
# Some lines or formats don't have a length (e.g. 304 redirects, IIS logs)
hit.length = 0
try:
hit.generation_time_milli = int(match.group('generation_time_milli'))
except IndexError:
try:
hit.generation_time_milli = int(match.group('generation_time_micro')) / 1000
except IndexError:
hit.generation_time_milli = 0
if config.options.log_hostname:
hit.host = config.options.log_hostname
else:
try:
hit.host = match.group('host').lower().strip('.')
except IndexError:
# Some formats have no host.
pass
# Check if the hit must be excluded.
if not all((method(hit) for method in self.check_methods)):
continue
# Parse date.
# We parse it after calling check_methods as it's quite CPU hungry, and
# we want to avoid that cost for excluded hits.
date_string = match.group('date')
try:
hit.date = datetime.datetime.strptime(date_string, format.date_format)
except ValueError:
invalid_line(line, 'invalid date')
continue
# Parse timezone and substract its value from the date
try:
timezone = float(match.group('timezone'))
except IndexError:
timezone = 0
except ValueError:
invalid_line(line, 'invalid timezone')
continue
if timezone:
hit.date -= datetime.timedelta(hours=timezone/100)
if config.options.replay_tracking:
# we need a query string and we only consider requests with piwik.php
if not hit.query_string or not hit.path.lower().endswith('piwik.php'):
continue
query_arguments = urlparse.parse_qs(hit.query_string)
if not "idsite" in query_arguments:
invalid_line(line, 'missing idsite')
continue
try:
hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
# Check if the hit must be excluded.
if all((method(hit) for method in self.check_methods)):
hits.append(hit)
if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):
Recorder.add_hits(hits)
hits = []
# add last chunk of hits
if len(hits) > 0:
Recorder.add_hits(hits)
def main():
"""
Start the importing process.
"""
stats.set_time_start()
if config.options.show_progress:
stats.start_monitor()
recorders = Recorder.launch(config.options.recorders)
try:
for filename in config.filenames:
parser.parse(filename)
Recorder.wait_empty()
except KeyboardInterrupt:
pass
stats.set_time_stop()
if config.options.show_progress:
stats.stop_monitor()
try:
Recorder.invalidate_reports()
except Piwik.Error, e:
pass
stats.print_summary()
def fatal_error(error, filename=None, lineno=None):
print >> sys.stderr, 'Fatal error: %s' % error
if filename and lineno is not None:
print >> sys.stderr, (
'You can restart the import of "%s" from the point it failed by '
'specifying --skip=%d on the command line.\n' % (filename, lineno)
)
os._exit(1)
if __name__ == '__main__':
try:
piwik = Piwik()
config = Configuration()
stats = Statistics()
resolver = config.get_resolver()
parser = Parser()
main()
sys.exit(0)
except KeyboardInterrupt:
pass
| gpl-2.0 | -3,288,608,415,037,848,600 | 35.108365 | 194 | 0.557135 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_pkg_res_factory.py | 1 | 3476 | """
pkg_res_factory test
"""
import logging
import os
import unittest
# pylint: disable-msg=F0401
from pkg_resources import DistributionNotFound, VersionConflict
from pkg_resources import Requirement, Environment, working_set
from openmdao.main.pkg_res_factory import PkgResourcesFactory
from openmdao.main.api import Component, get_available_types
# pylint: disable-msg=C0103
class PkgResFactoryTestCase(unittest.TestCase):
"""tester for pkg_res_factory"""
def test_load(self):
# make sure we're looking in the right spot for the plugins whether
# we're in a develop egg or in the released version
dist = working_set.find(Requirement.parse('openmdao.test'))
fact = PkgResourcesFactory(['openmdao.component'], None)
comp = fact.create('openmdao.test.execcomp.ExecComp',
exprs=['x = a+1','y=b-2','z=x*2'])
comp.a = 4
comp.b = 2
comp.run()
self.assertEqual(comp.x, 5)
self.assertEqual(comp.y, 0)
self.assertEqual(comp.z, 10)
def test_get_available_types(self):
tups = get_available_types()
types = set([x[0] for x in tups])
iface_dict = dict((key, value['ifaces']) for (key, value) in tups)
expected = set(['openmdao.lib.components.external_code.ExternalCode',
'openmdao.lib.components.mux.DeMux',
'openmdao.lib.drivers.doedriver.DOEdriver',
'openmdao.lib.drivers.genetic.Genetic',
'openmdao.lib.drivers.caseiterdriver.CaseIteratorDriver',
'openmdao.lib.components.metamodel.MetaModel',
'openmdao.lib.components.expected_improvement_multiobj.MultiObjExpectedImprovement',
'openmdao.lib.drivers.conmindriver.CONMINdriver',
'openmdao.lib.drivers.simplecid.SimpleCaseIterDriver',
'openmdao.lib.components.pareto_filter.ParetoFilter',
'openmdao.lib.drivers.newsumtdriver.NEWSUMTdriver',
'openmdao.lib.components.mux.Mux',
'openmdao.lib.components.expected_improvement.ExpectedImprovement',
'openmdao.test.execcomp.ExecComp',
'openmdao.main.assembly.Assembly',
'openmdao.lib.drivers.iterate.FixedPointIterator',
'openmdao.lib.optproblems.sellar.SellarProblem',
'openmdao.lib.optproblems.branin.BraninProblem',
'openmdao.lib.optproblems.polyscale.PolyScalableProblem'])
missing = expected - types
if missing:
self.fail("the following expected types were missing: %s" % missing)
for typ,meta in tups:
if not isinstance(meta, dict):
self.fail("%s did not return a metadata dict from get_available_types" % typ)
if 'version' not in meta:
self.fail("the metadata for %s did not contain 'version'" % typ)
if 'ifaces' not in meta:
self.fail("the metadata for %s did not contain 'ifaces'" % typ)
self.assertEqual(iface_dict['openmdao.lib.drivers.conmindriver.CONMINdriver'],
['IHasObjective', 'IComponent', 'IHasParameters', 'IHasIneqConstraints', 'IContainer', 'IDriver', 'IOptimizer'])
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -3,696,755,660,376,390,700 | 44.736842 | 137 | 0.60702 | false |
ewenmaclean/coinvent_demo | settings_pre.py | 1 | 2248 | ###### The number of models to be generated (0 for all models)
numModels = 10
###### The minimal number of iterations for generalization
minIterationsGeneralize = 1
###### The maximal number of iterations for generalization
maxIterationsGeneralize = 20
###### The minimal number of iterations for blending
# minIterationsBlend = 1
###### The maximal number of iterations for blending
# maxIterationsBlend = 20
###### Percentage of blend value below the currently highest value found so far that we want to keep in the results. 0 means that we only keep the blends with the highest value found so far, 100 means to consider blends with half the value, etc.. Setting this to -sys.maxint-1 means to consider all blends.
blendValuePercentageBelowHighestValueToKeep = 25
# blendCostPercentageAboveMinToKeep = sys.maxint
###### Time limit for eprover and darwin consistency check in seconds CPU time
eproverTimeLimit = 5
darwinTimeLimit = 2
###### Path to the HETS executable ######
hetsExe = 'hets'
# hetsExe = '/media/psf/Home/svn/coinvent/Amalgamation/hetsPrio' # This is the experimental version with priorities by Mihai.
###### Switch to enable the explicit generation of blend files (see function writeBlends.py) ######
genExplicitBlendFiles = True
############################
###### CASL-specific #######
############################
###### For generalization, determine whether removal of axioms, sorts, predicates and operators are allowed atomic generalization actions. For certain domains it may be useful to use only a subset of generalization operations.
# TODO: THis is currently not working... For now (de)-comment respective LP rules in generalize.lp file to select generalization operations
# rmAxAllowed = True
# rmPredAllowed = True
# rmOpAllowed = True
# rmSortAllowed = True
###################################################################
## Here is space to quickly overwrite the above settings for debugging purposes.
# inputFile = "examples/cadenceByAx.casl"
# inputSpaceNames = ["Perfect7Cadence","PhrygianCadence"]
# inputFile = "examples/houseBoat.casl"
# inputSpaceNames = ["Boat","House"]
# inputFile = "examples/LPNMR/cadenceBlendFusionTest.casl"
# inputSpaceNames = ["PhrygianCadence","PerfectCadence"]
| gpl-2.0 | 9,096,900,775,222,598,000 | 41.415094 | 306 | 0.718416 | false |
losywee/rethinkdb | test/interface/stat.py | 13 | 16258 | #!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
# This file tests the `rethinkdb.stats` admin table.
# The scenario works by starting a cluster of two servers and two tables. The tables are
# then sharded across the two servers (no replicas), and populated with 100 rows.
# A small read/write workload runs in the background during the entire test to ensure
# that we have stats to read. In addition, we run with a cache-size of zero to force
# disk reads and writes.
#
# 1. Cluster is started, table populated
# 2. Gather and verify stats
# 3. Shut down the second server
# 4. Gather and verify stats - observe timeouts for the missing server
# 5. Restart the second server
# 6. Gather and verify stats
#
# Stats verification is rather weak because we can't expect specific values for many
# fields. For most of them, we simple assert that they are greater than zero. In
# addition, the full scan of the `stats` table in verified for internal consistency.
# That is, we make sure the tables' and servers' stats add up to the cluster stats,
# and so on. This is not valid when getting rows from the stats table individually,
# as there will be race conditions then.
from __future__ import print_function
import sys, os, time, re, multiprocessing, random, pprint
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse, workload_runner
r = utils.import_python_driver()
db = 'test'
server_names = ['nate', 'grey']
table_names = ['foo', 'bar']
def read_write_workload(port, table, stop_event):
conn = r.connect("localhost", port)
ids = list(r.range(100).map(lambda x: r.uuid()).run(conn))
r.db(db).table(table).insert([{'id': i, 'value': 0} for i in ids]).run(conn)
# Increment this every loop so the update actually results in a write
counter = 0
while not stop_event.is_set():
counter += 1
try:
r.db(db).table(table).get(random.choice(ids)).run(conn)
r.db(db).table(table).insert({'id':random.choice(ids), 'value': counter},
conflict='replace').run(conn)
time.sleep(0.05)
except r.ReqlRuntimeError:
# Ignore runtime errors and keep going until stopped
pass
# Per-second values are floats, so do a fuzzy comparison to allow for accumulated error
def fuzzy_compare(left, right):
return (left - right) < 1e-03
def find_rows(global_stats, pred):
res = [ ]
for row in global_stats:
if pred(row['id']):
res.append(row)
assert len(res) != 0, "Missing stats row"
return res
def check_sum_stat(path, iterable, expected):
def walk_object(path, o):
for p in path:
o = o[p]
return o
total = 0.0
for item in iterable:
# Don't count the row if it errored - the stats are missing anyway
if 'error' not in item:
total += walk_object(path, item)
if 'error' not in expected:
assert fuzzy_compare(total, walk_object(path, expected)), \
"Stats (%s) did not add up, expected %f, got %f" % (repr(path), total, walk_object(expected))
# Verifies that the table_server stats add up to the table stats
def check_table_stats(table_id, global_stats):
table_row = find_rows(global_stats, lambda row_id: row_id == ['table', table_id])
assert len(table_row) == 1
table_row = table_row[0]
table_server_rows = find_rows(global_stats,
lambda row_id: len(row_id) == 3 and \
row_id[0] == 'table_server' and \
row_id[1] == table_id)
check_sum_stat(['query_engine', 'read_docs_per_sec'], table_server_rows, table_row)
check_sum_stat(['query_engine', 'written_docs_per_sec'], table_server_rows, table_row)
# Verifies that the table_server stats add up to the server stats
def check_server_stats(server_id, global_stats):
server_row = find_rows(global_stats, lambda row_id: row_id == ['server', server_id])
assert len(server_row) == 1
server_row = server_row[0]
table_server_rows = find_rows(global_stats,
lambda row_id: len(row_id) == 3 and \
row_id[0] == 'table_server' and \
row_id[2] == server_id)
check_sum_stat(['query_engine', 'read_docs_per_sec'], table_server_rows, server_row)
check_sum_stat(['query_engine', 'written_docs_per_sec'], table_server_rows, server_row)
check_sum_stat(['query_engine', 'read_docs_total'], table_server_rows, server_row)
check_sum_stat(['query_engine', 'written_docs_total'], table_server_rows, server_row)
# Verifies that table and server stats add up to the cluster stats
def check_cluster_stats(global_stats):
cluster_row = find_rows(global_stats, lambda row_id: row_id == ['cluster'])
assert len(cluster_row) == 1
cluster_row = cluster_row[0]
table_rows = find_rows(global_stats,
lambda row_id: len(row_id) == 2 and \
row_id[0] == 'table')
check_sum_stat(['query_engine', 'read_docs_per_sec'], table_rows, cluster_row)
check_sum_stat(['query_engine', 'written_docs_per_sec'], table_rows, cluster_row)
server_rows = find_rows(global_stats,
lambda row_id: len(row_id) == 2 and \
row_id[0] == 'server')
check_sum_stat(['query_engine', 'read_docs_per_sec'], server_rows, cluster_row)
check_sum_stat(['query_engine', 'written_docs_per_sec'], server_rows, cluster_row)
check_sum_stat(['query_engine', 'client_connections'], server_rows, cluster_row)
check_sum_stat(['query_engine', 'clients_active'], server_rows, cluster_row)
def get_and_check_global_stats(tables, servers, conn):
global_stats = list(r.db('rethinkdb').table('stats').run(conn))
check_cluster_stats(global_stats)
for table in tables:
check_table_stats(table['id'], global_stats)
for server in servers:
check_server_stats(server['id'], global_stats)
assert len(global_stats) == 1 + len(tables) + len(servers) + (len(tables) * len(servers))
return global_stats
def get_individual_stats(global_stats, conn):
res = [ ]
for row in global_stats:
rerow = r.db('rethinkdb').table('stats').get(row['id']).run(conn)
assert isinstance(rerow, dict)
assert rerow['id'] == row['id']
res.append(rerow)
return res
# Global and individual stats should be in the same order
# This also assumes that the individual stats were collected after the global stats
# The only thing we know about `per_sec` stats is that they are non-zero
# For `total` stats, we can check that they only increase with time
def compare_global_and_individual_stats(global_stats, individual_stats):
assert len(global_stats) == len(individual_stats)
for i in xrange(len(global_stats)):
a = global_stats[i]
b = individual_stats[i]
assert a['id'] == b['id']
if a['id'][0] == 'cluster':
assert a['query_engine']['queries_per_sec'] > 0
assert b['query_engine']['queries_per_sec'] > 0
assert a['query_engine']['read_docs_per_sec'] > 0
assert b['query_engine']['read_docs_per_sec'] > 0
assert a['query_engine']['written_docs_per_sec'] > 0
assert b['query_engine']['written_docs_per_sec'] > 0
assert a['query_engine']['client_connections'] == b['query_engine']['client_connections'] == len(table_names) + 1
elif a['id'][0] == 'server':
assert a['server'] == b['server']
assert a['query_engine']['queries_per_sec'] >= 0
assert b['query_engine']['queries_per_sec'] >= 0
assert a['query_engine']['read_docs_per_sec'] > 0
assert b['query_engine']['read_docs_per_sec'] > 0
assert a['query_engine']['written_docs_per_sec'] > 0
assert b['query_engine']['written_docs_per_sec'] > 0
assert a['query_engine']['queries_total'] <= b['query_engine']['queries_total']
assert a['query_engine']['read_docs_total'] <= b['query_engine']['read_docs_total']
assert a['query_engine']['written_docs_total'] <= b['query_engine']['written_docs_total']
elif a['id'][0] == 'table':
assert a['db'] == b['db']
assert a['table'] == b['table']
assert a['query_engine']['read_docs_per_sec'] > 0
assert b['query_engine']['read_docs_per_sec'] > 0
assert a['query_engine']['written_docs_per_sec'] > 0
assert b['query_engine']['written_docs_per_sec'] > 0
elif a['id'][0] == 'table_server':
assert a['db'] == b['db']
assert a['table'] == b['table']
assert a['server'] == b['server']
assert a['query_engine']['read_docs_per_sec'] > 0
assert b['query_engine']['read_docs_per_sec'] > 0
assert a['query_engine']['written_docs_per_sec'] > 0
assert b['query_engine']['written_docs_per_sec'] > 0
assert a['query_engine']['read_docs_total'] <= b['query_engine']['read_docs_total']
assert a['query_engine']['written_docs_total'] <= b['query_engine']['written_docs_total']
assert a['storage_engine']['disk']['read_bytes_per_sec'] > 0
assert a['storage_engine']['disk']['written_bytes_per_sec'] > 0
assert b['storage_engine']['disk']['read_bytes_per_sec'] > 0
assert b['storage_engine']['disk']['written_bytes_per_sec'] > 0
assert a['storage_engine']['disk']['read_bytes_total'] <= b['storage_engine']['disk']['read_bytes_total']
assert a['storage_engine']['disk']['written_bytes_total'] <= b['storage_engine']['disk']['written_bytes_total']
# even though cache size is 0, the server may use more while processing a query
assert a['storage_engine']['cache']['in_use_bytes'] >= 0
assert b['storage_engine']['cache']['in_use_bytes'] >= 0
# unfortunately we can't make many assumptions about the disk space
assert a['storage_engine']['disk']['space_usage']['data_bytes'] >= 0
assert a['storage_engine']['disk']['space_usage']['metadata_bytes'] >= 0
assert b['storage_engine']['disk']['space_usage']['data_bytes'] >= 0
assert b['storage_engine']['disk']['space_usage']['metadata_bytes'] >= 0
else:
assert False, "Unrecognized stats row id: %s" % repr(a['id'])
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
opts = op.parse(sys.argv)
with driver.Metacluster() as metacluster:
cluster = driver.Cluster(metacluster)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(opts)
# We use a cache size of 0 to force disk reads
serve_options += ['--cache-size', '0']
print('Spinning up %d processes...' % len(server_names))
servers = [ ]
for i in xrange(len(server_names)):
info = { 'name': server_names[i] }
info['files'] = driver.Files(metacluster, db_path='db-%d' % i,
console_output='create-output-%d' % i,
server_name=info['name'], command_prefix=command_prefix)
info['process'] = driver.Process(cluster, info['files'],
console_output='serve-output-%d' % i,
command_prefix=command_prefix, extra_options=serve_options)
servers.append(info)
for server in servers:
server['process'].wait_until_started_up()
conn = r.connect(servers[0]['process'].host, servers[0]['process'].driver_port)
print('Creating %d tables...' % len(table_names))
stop_event = multiprocessing.Event()
# Store uuids for each table and server for verification purposes
r.db_create(db).run(conn)
tables = [ ]
for name in table_names:
info = { 'name': name }
r.db(db).table_create(name, shards=2, replicas=1).run(conn)
info['db_id'] = r.db(db).config()['id'].run(conn)
info['id'] = r.db(db).table(info['name']).config()['id'].run(conn)
info['workload'] = multiprocessing.Process(target=read_write_workload, args=(servers[0]['process'].driver_port, name, stop_event))
info['workload'].start()
tables.append(info)
for server in servers:
server['id'] = r.db('rethinkdb').table('server_config') \
.filter(r.row['name'].eq(server['name']))[0]['id'].run(conn)
# Allow some time for the workload to get the stats going
time.sleep(1)
try:
# Perform table scan, get each row individually, and check the integrity of the results
all_stats = get_and_check_global_stats(tables, servers, conn)
also_stats = get_individual_stats(all_stats, conn)
compare_global_and_individual_stats(all_stats, also_stats)
# Shut down one server
print("Killing second server...")
servers[1]['process'].close()
time.sleep(5)
# Perform table scan, observe that server 1 is now gone
all_stats = get_and_check_global_stats(tables, [servers[0]], conn)
also_stats = get_individual_stats(all_stats, conn)
compare_global_and_individual_stats(all_stats, also_stats)
# Basic test of the `_debug_stats` table
debug_stats_0 = r.db('rethinkdb').table('_debug_stats') \
.get(servers[0]["id"]).run(conn)
debug_stats_1 = r.db('rethinkdb').table('_debug_stats') \
.get(servers[1]["id"]).run(conn)
assert debug_stats_0["stats"]["eventloop"]["total"] > 0
assert debug_stats_1 is None
# Restart server
print("Restarting second server...")
servers[1]['process'] = driver.Process(cluster, servers[1]['files'],
console_output='serve-output-1',
command_prefix=command_prefix, extra_options=serve_options)
servers[1]['process'].wait_until_started_up()
time.sleep(5)
# Perform table scan
all_stats = get_and_check_global_stats(tables, servers, conn)
also_stats = get_individual_stats(all_stats, conn)
compare_global_and_individual_stats(all_stats, also_stats)
# Verify that 'total' stats are non-zero
def check_non_zero_totals(stats):
for row in stats:
if row['id'][0] == 'server':
if row['id'][1] == servers[1]['id']:
assert row['query_engine']['queries_total'] == 0
else:
assert row['query_engine']['queries_total'] > 0
assert row['query_engine']['read_docs_total'] > 0
assert row['query_engine']['written_docs_total'] > 0
if row['id'][0] == 'table_server':
assert row['query_engine']['read_docs_total'] > 0
assert row['query_engine']['written_docs_total'] > 0
assert row['storage_engine']['disk']['read_bytes_total'] > 0
assert row['storage_engine']['disk']['written_bytes_total'] > 0
check_non_zero_totals(all_stats)
check_non_zero_totals(also_stats)
finally:
stop_event.set()
for table in tables:
table['workload'].join()
print("Checking that stats table is not writable...")
length = r.db("rethinkdb").table("stats").count().run(conn)
res = r.db("rethinkdb").table("stats").delete().run(conn)
assert res["errors"] == length, res
res = r.db("rethinkdb").table("stats").update({"foo": "bar"}).run(conn)
assert res["errors"] == length, res
res = r.db("rethinkdb").table("stats").insert({}).run(conn)
assert res["errors"] == 1, res
cluster.check_and_stop()
print('Done.')
| agpl-3.0 | 7,437,367,249,533,312,000 | 47.531343 | 138 | 0.58974 | false |
SmithsonianEnterprises/django-cms | cms/admin/settingsadmin.py | 47 | 3428 | # -*- coding: utf-8 -*-
from functools import update_wrapper
import json
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from django.contrib.auth.admin import csrf_protect_m
from django.db import transaction
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.translation import override
from cms.models import UserSettings
from cms.utils.urlutils import admin_reverse
class SettingsAdmin(ModelAdmin):
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
return [
url(r'^session_store/$',
self.session_store,
name='%s_%s_session_store' % info),
url(r'^$',
wrap(self.change_view),
name='%s_%s_change' % info),
url(r'^(.+)/$',
wrap(self.change_view),
name='%s_%s_change' % info),
]
@csrf_protect_m
@transaction.atomic
def change_view(self, request, id=None):
model = self.model
try:
obj = model.objects.get(user=request.user)
except model.DoesNotExist:
return self.add_view(request)
return super(SettingsAdmin, self).change_view(request, str(obj.pk))
def session_store(self, request):
"""
either POST or GET
POST should have a settings parameter
"""
if not request.user.is_staff:
return HttpResponse(json.dumps(""),
content_type="application/json")
if request.method == "POST":
request.session['cms_settings'] = request.POST['settings']
request.session.save()
return HttpResponse(
json.dumps(request.session.get('cms_settings', '')),
content_type="application/json"
)
def save_model(self, request, obj, form, change):
obj.user = request.user
obj.save()
def response_post_save_change(self, request, obj):
#
# When the user changes his language setting, we need to do two things:
# 1. Change the language-prefix for the sideframed admin view
# 2. Reload the whole window so that the new language affects the
# toolbar, etc.
#
# To do this, we first redirect the sideframe to the correct new, URL,
# but we pass a GET param 'reload_window', which instructs JS on that
# page to strip (to avoid infinite redirection loops) that param then
# reload the whole window again.
#
with override(obj.language):
post_url = admin_reverse(
'cms_usersettings_change',
args=[obj.id, ],
current_app=self.admin_site.name
)
return HttpResponseRedirect("{0}?reload_window".format(post_url))
def has_change_permission(self, request, obj=None):
if obj and obj.user == request.user:
return True
return False
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(UserSettings, SettingsAdmin)
| bsd-3-clause | -4,519,536,094,190,621,000 | 32.940594 | 79 | 0.595683 | false |
n4hy/gnuradio | gr-uhd/apps/uhd_fft.py | 1 | 10069 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import uhd
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
import numpy
try:
from gnuradio.wxgui import stdgui2, form, slider
from gnuradio.wxgui import fftsink2, waterfallsink2, scopesink2
import wx
except ImportError:
sys.stderr.write("Error importing GNU Radio's wxgui. Please make sure gr-wxgui is installed.\n")
sys.exit(1)
class app_top_block(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args , [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-s", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate (bandwidth) [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-W", "--waterfall", action="store_true", default=False,
help="Enable waterfall display")
parser.add_option("-S", "--oscilloscope", action="store_true", default=False,
help="Enable oscilloscope display")
parser.add_option("", "--avg-alpha", type="eng_float", default=1e-1,
help="Set fftsink averaging factor, default=[%default]")
parser.add_option("", "--ref-scale", type="eng_float", default=1.0,
help="Set dBFS=0dB input value, default=[%default]")
parser.add_option("--fft-size", type="int", default=1024,
help="Set number of FFT bins [default=%default]")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.options = options
self.show_debug_info = True
self.u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
self.u.set_samp_rate(options.samp_rate)
input_rate = self.u.get_samp_rate()
if options.waterfall:
self.scope = \
waterfallsink2.waterfall_sink_c (panel, fft_size=1024,
sample_rate=input_rate)
self.frame.SetMinSize((800, 420))
elif options.oscilloscope:
self.scope = scopesink2.scope_sink_c(panel, sample_rate=input_rate)
self.frame.SetMinSize((800, 600))
else:
self.scope = fftsink2.fft_sink_c (panel,
fft_size=options.fft_size,
sample_rate=input_rate,
ref_scale=options.ref_scale,
ref_level=20.0,
y_divs = 12,
avg_alpha=options.avg_alpha)
self.frame.SetMinSize((800, 420))
self.connect(self.u, self.scope)
self._build_gui(vbox)
self._setup_events()
# set initial values
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2
if options.freq is None:
# if no freq was specified, use the mid-point
r = self.u.get_freq_range()
options.freq = float(r.start()+r.stop())/2
self.set_gain(options.gain)
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
if self.show_debug_info:
self.myform['samprate'].set_value(self.u.get_samp_rate())
self.myform['rffreq'].set_value(0)
self.myform['dspfreq'].set_value(0)
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
def _set_status_msg(self, msg):
self.frame.GetStatusBar().SetStatusText(msg, 0)
def _build_gui(self, vbox):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
vbox.Add(self.scope.win, 10, wx.EXPAND)
# add control area at the bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0, 0)
myform['freq'] = form.float_field(
parent=self.panel, sizer=hbox, label="Center freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq,
self._set_status_msg))
hbox.Add((5,0), 0, 0)
g = self.u.get_gain_range()
# some configurations don't have gain control
if g.stop() > g.start():
myform['gain'] = form.slider_field(parent=self.panel,
sizer=hbox, label="Gain",
weight=3,
min=int(g.start()), max=int(g.stop()),
callback=self.set_gain)
hbox.Add((5,0), 0, 0)
vbox.Add(hbox, 0, wx.EXPAND)
self._build_subpanel(vbox)
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
def _form_set_samp_rate(kv):
return self.set_samp_rate(kv['samprate'])
if not(self.show_debug_info):
return
panel = self.panel
vbox = vbox_arg
myform = self.myform
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['samprate'] = form.float_field(
parent=panel, sizer=hbox, label="Sample Rate",
callback=myform.check_input_and_call(_form_set_samp_rate,
self._set_status_msg))
hbox.Add((5,0), 1)
myform['rffreq'] = form.static_float_field(
parent=panel, sizer=hbox, label="RF Freq.")
hbox.Add((5,0), 1)
myform['dspfreq'] = form.static_float_field(
parent=panel, sizer=hbox, label="DSP Freq.")
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq, 0)
if r:
self.myform['freq'].set_value(self.u.get_center_freq())
self.myform['rffreq'].set_value(r.actual_rf_freq)
self.myform['dspfreq'].set_value(r.actual_dsp_freq)
if not self.options.oscilloscope:
self.scope.set_baseband_freq(target_freq)
return True
return False
def set_gain(self, gain):
if self.myform.has_key('gain'):
self.myform['gain'].set_value(gain) # update displayed value
self.u.set_gain(gain, 0)
def set_samp_rate(self, samp_rate):
ok = self.u.set_samp_rate(samp_rate)
input_rate = self.u.get_samp_rate()
self.scope.set_sample_rate(input_rate)
if self.show_debug_info: # update displayed values
self.myform['samprate'].set_value(self.u.get_samp_rate())
# uhd set_samp_rate never fails; always falls back to closest requested.
return True
def _setup_events(self):
if not self.options.waterfall and not self.options.oscilloscope:
self.scope.win.Bind(wx.EVT_LEFT_DCLICK, self.evt_left_dclick)
def evt_left_dclick(self, event):
(ux, uy) = self.scope.win.GetXY(event)
if event.CmdDown():
# Re-center on maximum power
points = self.scope.win._points
if self.scope.win.peak_hold:
if self.scope.win.peak_vals is not None:
ind = numpy.argmax(self.scope.win.peak_vals)
else:
ind = int(points.shape()[0]/2)
else:
ind = numpy.argmax(points[:,1])
(freq, pwr) = points[ind]
target_freq = freq/self.scope.win._scale_factor
print ind, freq, pwr
self.set_freq(target_freq)
else:
# Re-center on clicked frequency
target_freq = ux/self.scope.win._scale_factor
self.set_freq(target_freq)
def main ():
app = stdgui2.stdapp(app_top_block, "UHD FFT", nstatus=1)
app.MainLoop()
if __name__ == '__main__':
main ()
| gpl-3.0 | -4,927,043,735,244,295,000 | 36.154982 | 100 | 0.571755 | false |
googleapis/python-documentai | samples/snippets/batch_process_documents_sample.py | 1 | 4812 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START documentai_batch_process_document]
import re
from google.cloud import documentai_v1 as documentai
from google.cloud import storage
# TODO(developer): Uncomment these variables before running the sample.
# project_id= 'YOUR_PROJECT_ID'
# location = 'YOUR_PROJECT_LOCATION' # Format is 'us' or 'eu'
# processor_id = 'YOUR_PROCESSOR_ID' # Create processor in Cloud Console
# gcs_input_uri = "YOUR_INPUT_URI"
# gcs_output_uri = "YOUR_OUTPUT_BUCKET_URI"
# gcs_output_uri_prefix = "YOUR_OUTPUT_URI_PREFIX"
def batch_process_documents(
project_id,
location,
processor_id,
gcs_input_uri,
gcs_output_uri,
gcs_output_uri_prefix,
timeout: int = 300,
):
# You must set the api_endpoint if you use a location other than 'us', e.g.:
opts = {}
if location == "eu":
opts = {"api_endpoint": "eu-documentai.googleapis.com"}
client = documentai.DocumentProcessorServiceClient(client_options=opts)
destination_uri = f"{gcs_output_uri}/{gcs_output_uri_prefix}/"
gcs_documents = documentai.GcsDocuments(
documents=[{"gcs_uri": gcs_input_uri, "mime_type": "application/pdf"}]
)
# 'mime_type' can be 'application/pdf', 'image/tiff',
# and 'image/gif', or 'application/json'
input_config = documentai.BatchDocumentsInputConfig(gcs_documents=gcs_documents)
# Where to write results
output_config = documentai.DocumentOutputConfig(
gcs_output_config={"gcs_uri": destination_uri}
)
# Location can be 'us' or 'eu'
name = f"projects/{project_id}/locations/{location}/processors/{processor_id}"
request = documentai.types.document_processor_service.BatchProcessRequest(
name=name,
input_documents=input_config,
document_output_config=output_config,
)
operation = client.batch_process_documents(request)
# Wait for the operation to finish
operation.result(timeout=timeout)
# Results are written to GCS. Use a regex to find
# output files
match = re.match(r"gs://([^/]+)/(.+)", destination_uri)
output_bucket = match.group(1)
prefix = match.group(2)
storage_client = storage.Client()
bucket = storage_client.get_bucket(output_bucket)
blob_list = list(bucket.list_blobs(prefix=prefix))
print("Output files:")
for i, blob in enumerate(blob_list):
# If JSON file, download the contents of this blob as a bytes object.
if ".json" in blob.name:
blob_as_bytes = blob.download_as_bytes()
document = documentai.types.Document.from_json(blob_as_bytes)
print(f"Fetched file {i + 1}")
# For a full list of Document object attributes, please reference this page:
# https://cloud.google.com/document-ai/docs/reference/rpc/google.cloud.documentai.v1beta3#document
# Read the text recognition output from the processor
for page in document.pages:
for form_field in page.form_fields:
field_name = get_text(form_field.field_name, document)
field_value = get_text(form_field.field_value, document)
print("Extracted key value pair:")
print(f"\t{field_name}, {field_value}")
for paragraph in page.paragraphs:
paragraph_text = get_text(paragraph.layout, document)
print(f"Paragraph text:\n{paragraph_text}")
else:
print(f"Skipping non-supported file type {blob.name}")
# Extract shards from the text field
def get_text(doc_element: dict, document: dict):
"""
Document AI identifies form fields by their offsets
in document text. This function converts offsets
to text snippets.
"""
response = ""
# If a text segment spans several lines, it will
# be stored in different text segments.
for segment in doc_element.text_anchor.text_segments:
start_index = (
int(segment.start_index)
if segment in doc_element.text_anchor.text_segments
else 0
)
end_index = int(segment.end_index)
response += document.text[start_index:end_index]
return response
# [END documentai_batch_process_document]
| apache-2.0 | -8,379,463,069,707,998,000 | 35.180451 | 110 | 0.663965 | false |
tombstone/models | research/efficient-hrl/environments/ant.py | 4 | 4728 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for creating the ant environment in gym_mujoco."""
import math
import numpy as np
import mujoco_py
from gym import utils
from gym.envs.mujoco import mujoco_env
def q_inv(a):
return [a[0], -a[1], -a[2], -a[3]]
def q_mult(a, b): # multiply two quaternion
w = a[0] * b[0] - a[1] * b[1] - a[2] * b[2] - a[3] * b[3]
i = a[0] * b[1] + a[1] * b[0] + a[2] * b[3] - a[3] * b[2]
j = a[0] * b[2] - a[1] * b[3] + a[2] * b[0] + a[3] * b[1]
k = a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + a[3] * b[0]
return [w, i, j, k]
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
FILE = "ant.xml"
ORI_IND = 3
def __init__(self, file_path=None, expose_all_qpos=True,
expose_body_coms=None, expose_body_comvels=None):
self._expose_all_qpos = expose_all_qpos
self._expose_body_coms = expose_body_coms
self._expose_body_comvels = expose_body_comvels
self._body_com_indices = {}
self._body_comvel_indices = {}
mujoco_env.MujocoEnv.__init__(self, file_path, 5)
utils.EzPickle.__init__(self)
@property
def physics(self):
# check mujoco version is greater than version 1.50 to call correct physics
# model containing PyMjData object for getting and setting position/velocity
# check https://github.com/openai/mujoco-py/issues/80 for updates to api
if mujoco_py.get_version() >= '1.50':
return self.sim
else:
return self.model
def _step(self, a):
return self.step(a)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore) / self.dt
ctrl_cost = .5 * np.square(a).sum()
survive_reward = 1.0
reward = forward_reward - ctrl_cost + survive_reward
state = self.state_vector()
done = False
ob = self._get_obs()
return ob, reward, done, dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_survive=survive_reward)
def _get_obs(self):
# No cfrc observation
if self._expose_all_qpos:
obs = np.concatenate([
self.physics.data.qpos.flat[:15], # Ensures only ant obs.
self.physics.data.qvel.flat[:14],
])
else:
obs = np.concatenate([
self.physics.data.qpos.flat[2:15],
self.physics.data.qvel.flat[:14],
])
if self._expose_body_coms is not None:
for name in self._expose_body_coms:
com = self.get_body_com(name)
if name not in self._body_com_indices:
indices = range(len(obs), len(obs) + len(com))
self._body_com_indices[name] = indices
obs = np.concatenate([obs, com])
if self._expose_body_comvels is not None:
for name in self._expose_body_comvels:
comvel = self.get_body_comvel(name)
if name not in self._body_comvel_indices:
indices = range(len(obs), len(obs) + len(comvel))
self._body_comvel_indices[name] = indices
obs = np.concatenate([obs, comvel])
return obs
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
# Set everything other than ant to original position and 0 velocity.
qpos[15:] = self.init_qpos[15:]
qvel[14:] = 0.
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
def get_ori(self):
ori = [0, 1, 0, 0]
rot = self.physics.data.qpos[self.__class__.ORI_IND:self.__class__.ORI_IND + 4] # take the quaternion
ori = q_mult(q_mult(rot, ori), q_inv(rot))[1:3] # project onto x-y plane
ori = math.atan2(ori[1], ori[0])
return ori
def set_xy(self, xy):
qpos = np.copy(self.physics.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
def get_xy(self):
return self.physics.data.qpos[:2]
| apache-2.0 | -369,024,179,318,738,500 | 32.531915 | 106 | 0.616117 | false |
Chibin/gpdb | gpMgmt/bin/gppylib/mainUtils.py | 14 | 22032 | # Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
"""
mainUtils.py
------------
This file provides a rudimentary framework to support top-level option
parsing, initialization and cleanup logic common to multiple programs.
The primary interface function is 'simple_main'. For an example of
how it is expected to be used, see gprecoverseg.
It is anticipated that the functionality of this file will grow as we
extend common functions of our gp utilities. Please keep this in mind
and try to avoid placing logic for a specific utility here.
"""
import os, sys, signal, errno, yaml
gProgramName = os.path.split(sys.argv[0])[-1]
if sys.version_info < (2, 5, 0):
sys.exit(
'''Error: %s is supported on Python versions 2.5 or greater
Please upgrade python installed on this machine.''' % gProgramName)
from gppylib import gplog
from gppylib.commands import gp, unix
from gppylib.commands.base import ExecutionError
from gppylib.system import configurationInterface, configurationImplGpdb, fileSystemInterface, \
fileSystemImplOs, osInterface, osImplNative, faultProberInterface, faultProberImplGpdb
from optparse import OptionGroup, OptionParser, SUPPRESS_HELP
from lockfile.pidlockfile import PIDLockFile, LockTimeout
def getProgramName():
"""
Return the name of the current top-level program from sys.argv[0]
or the programNameOverride option passed to simple_main via mainOptions.
"""
global gProgramName
return gProgramName
class SimpleMainLock:
"""
Tools like gprecoverseg prohibit running multiple instances at the same time
via a simple lock file created in the MASTER_DATA_DIRECTORY. This class takes
care of the work to manage this lock as appropriate based on the mainOptions
specified.
Note that in some cases, the utility may want to recursively invoke
itself (e.g. gprecoverseg -r). To handle this, the caller may specify
the name of an environment variable holding the pid already acquired by
the parent process.
"""
def __init__(self, mainOptions):
self.pidfilename = mainOptions.get('pidfilename', None) # the file we're using for locking
self.parentpidvar = mainOptions.get('parentpidvar', None) # environment variable holding parent pid
self.parentpid = None # parent pid which already has the lock
self.ppath = None # complete path to the lock file
self.pidlockfile = None # PIDLockFile object
self.pidfilepid = None # pid of the process which has the lock
self.locktorelease = None # PIDLockFile object we should release when done
if self.parentpidvar is not None and self.parentpidvar in os.environ:
self.parentpid = int(os.environ[self.parentpidvar])
if self.pidfilename is not None:
self.ppath = os.path.join(gp.get_masterdatadir(), self.pidfilename)
self.pidlockfile = PIDLockFile(self.ppath)
def acquire(self):
"""
Attempts to acquire the lock this process needs to proceed.
Returns None on successful acquisition of the lock or
the pid of the other process which already has the lock.
"""
# nothing to do if utiliity requires no locking
if self.pidlockfile is None:
return None
# look for a lock file
self.pidfilepid = self.pidlockfile.read_pid()
if self.pidfilepid is not None:
# we found a lock file
# allow the process to proceed if the locker was our parent
if self.pidfilepid == self.parentpid:
return None
# cleanup stale locks
try:
os.kill(self.pidfilepid, signal.SIG_DFL)
except OSError, exc:
if exc.errno == errno.ESRCH:
self.pidlockfile.break_lock()
self.pidfilepid = None
# try and acquire the lock
try:
self.pidlockfile.acquire(1)
except LockTimeout:
self.pidfilepid = self.pidlockfile.read_pid()
return self.pidfilepid
# we have the lock
# prepare for a later call to release() and take good
# care of the process environment for the sake of our children
self.locktorelease = self.pidlockfile
self.pidfilepid = self.pidlockfile.read_pid()
if self.parentpidvar is not None:
os.environ[self.parentpidvar] = str(self.pidfilepid)
return None
def release(self):
"""
Releases the lock this process acquired.
"""
if self.locktorelease is not None:
self.locktorelease.release()
self.locktorelease = None
#
# exceptions we handle specially by the simple_main framework.
#
class ProgramArgumentValidationException(Exception):
"""
Throw this out to main to have the message possibly
printed with a help suggestion.
"""
def __init__(self, msg, shouldPrintHelp=False):
"init"
Exception.__init__(self, msg)
self.__shouldPrintHelp = shouldPrintHelp
self.__msg = msg
def shouldPrintHelp(self):
"shouldPrintHelp"
return self.__shouldPrintHelp
def getMessage(self):
"getMessage"
return self.__msg
class ExceptionNoStackTraceNeeded(Exception):
"""
Our code throws this exception when we encounter a condition
we know can arise which demands immediate termination.
"""
pass
class UserAbortedException(Exception):
"""
UserAbortedException should be thrown when a user decides to stop the
program (at a y/n prompt, for example).
"""
pass
def simple_main(createOptionParserFn, createCommandFn, mainOptions=None):
"""
createOptionParserFn : a function that takes no arguments and returns an OptParser
createCommandFn : a function that takes two arguments (the options and the args (those that are not processed into
options) and returns an object that has "run" and "cleanup" functions. Its "run" function must
run and return an exit code. "cleanup" will be called to clean up before the program exits;
this can be used to clean up, for example, to clean up a worker pool
mainOptions can include: forceQuietOutput (map to bool),
programNameOverride (map to string)
suppressStartupLogMessage (map to bool)
useHelperToolLogging (map to bool)
setNonuserOnToolLogger (map to bool, defaults to false)
pidfilename (string)
parentpidvar (string)
"""
simple_main_internal(createOptionParserFn, createCommandFn, mainOptions)
def simple_main_internal(createOptionParserFn, createCommandFn, mainOptions):
"""
If caller specifies 'pidfilename' in mainOptions then we manage the
specified pid file within the MASTER_DATA_DIRECTORY before proceeding
to execute the specified program and we clean up the pid file when
we're done.
"""
sml = None
if mainOptions is not None and 'pidfilename' in mainOptions:
sml = SimpleMainLock(mainOptions)
otherpid = sml.acquire()
if otherpid is not None:
logger = gplog.get_default_logger()
logger.error("An instance of %s is already running (pid %s)" % (getProgramName(), otherpid))
return
# at this point we have whatever lock we require
try:
simple_main_locked(createOptionParserFn, createCommandFn, mainOptions)
finally:
if sml is not None:
sml.release()
def simple_main_locked(createOptionParserFn, createCommandFn, mainOptions):
"""
Not to be called externally -- use simple_main instead
"""
logger = gplog.get_default_logger()
configurationInterface.registerConfigurationProvider(
configurationImplGpdb.GpConfigurationProviderUsingGpdbCatalog())
fileSystemInterface.registerFileSystemProvider(fileSystemImplOs.GpFileSystemProviderUsingOs())
osInterface.registerOsProvider(osImplNative.GpOsProviderUsingNative())
faultProberInterface.registerFaultProber(faultProberImplGpdb.GpFaultProberImplGpdb())
commandObject = None
parser = None
forceQuiet = mainOptions is not None and mainOptions.get("forceQuietOutput")
options = None
if mainOptions is not None and mainOptions.get("programNameOverride"):
global gProgramName
gProgramName = mainOptions.get("programNameOverride")
suppressStartupLogMessage = mainOptions is not None and mainOptions.get("suppressStartupLogMessage")
useHelperToolLogging = mainOptions is not None and mainOptions.get("useHelperToolLogging")
nonuser = True if mainOptions is not None and mainOptions.get("setNonuserOnToolLogger") else False
exit_status = 1
# NOTE: if this logic is changed then also change test_main in testUtils.py
try:
execname = getProgramName()
hostname = unix.getLocalHostname()
username = unix.getUserName()
parser = createOptionParserFn()
(options, args) = parser.parse_args()
if useHelperToolLogging:
gplog.setup_helper_tool_logging(execname, hostname, username)
else:
gplog.setup_tool_logging(execname, hostname, username,
logdir=options.ensure_value("logfileDirectory", None), nonuser=nonuser)
if forceQuiet:
gplog.quiet_stdout_logging()
else:
if options.ensure_value("verbose", False):
gplog.enable_verbose_logging()
if options.ensure_value("quiet", False):
gplog.quiet_stdout_logging()
if options.ensure_value("masterDataDirectory", None) is not None:
options.master_data_directory = os.path.abspath(options.masterDataDirectory)
if not suppressStartupLogMessage:
logger.info("Starting %s with args: %s" % (gProgramName, ' '.join(sys.argv[1:])))
commandObject = createCommandFn(options, args)
exitCode = commandObject.run()
exit_status = exitCode
except ProgramArgumentValidationException, e:
if e.shouldPrintHelp():
parser.print_help()
logger.error("%s: error: %s" % (gProgramName, e.getMessage()))
exit_status = 2
except ExceptionNoStackTraceNeeded, e:
logger.error("%s error: %s" % (gProgramName, e))
exit_status = 2
except UserAbortedException, e:
logger.info("User abort requested, Exiting...")
exit_status = 4
except ExecutionError, e:
logger.fatal("Error occurred: %s\n Command was: '%s'\n"
"rc=%d, stdout='%s', stderr='%s'" % \
(e.summary, e.cmd.cmdStr, e.cmd.results.rc, e.cmd.results.stdout,
e.cmd.results.stderr))
exit_status = 2
except Exception, e:
if options is None:
logger.exception("%s failed. exiting...", gProgramName)
else:
if options.ensure_value("verbose", False):
logger.exception("%s failed. exiting...", gProgramName)
else:
logger.fatal("%s failed. (Reason='%s') exiting..." % (gProgramName, e))
exit_status = 2
except KeyboardInterrupt:
exit_status = 2
finally:
if commandObject:
commandObject.cleanup()
sys.exit(exit_status)
def addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption, includeUsageOption=False):
"""
Add the standard options for help and logging
to the specified parser object.
"""
parser.set_usage('%prog [--help] [options] ')
parser.remove_option('-h')
addTo = parser
addTo.add_option('-h', '-?', '--help', action='help',
help='show this help message and exit')
if includeUsageOption:
parser.add_option('--usage', action="briefhelp")
addTo = OptionGroup(parser, "Logging Options")
parser.add_option_group(addTo)
addTo.add_option('-v', '--verbose', action='store_true',
help='debug output.')
addTo.add_option('-q', '--quiet', action='store_true',
help='suppress status messages')
addTo.add_option("-l", None, dest="logfileDirectory", metavar="<directory>", type="string",
help="Logfile directory")
if includeNonInteractiveOption:
addTo.add_option('-a', dest="interactive", action='store_false', default=True,
help="quiet mode, do not require user input for confirmations")
def addMasterDirectoryOptionForSingleClusterProgram(addTo):
"""
Add the -d master directory option to the specified parser object
which is intended to provide the value of the master data directory.
For programs that operate on multiple clusters at once, this function/option
is not appropriate.
"""
addTo.add_option('-d', '--master_data_directory', type='string',
dest="masterDataDirectory",
metavar="<master data directory>",
help="Optional. The master host data directory. If not specified, the value set" \
"for $MASTER_DATA_DIRECTORY will be used.")
#
# YamlMain
#
def get_yaml(targetclass):
"get_yaml"
# doc - class's doc string
# pos - where YAML starts in doc
# ystr - YAML string extracted from doc
if not hasattr(targetclass, '_yaml') or targetclass._yaml is None:
doc = targetclass.__doc__
pos = doc.find('%YAML')
assert pos >= 0, "targetclass doc string is missing %YAML plan"
ystr = doc[pos:].replace('\n ', '\n')
targetclass._yaml = yaml.load(ystr)
return targetclass._yaml
class YamlMain:
"YamlMain"
def __init__(self):
"Parse arguments based on yaml docstring"
self.current = None
self.plan = None
self.scenario_name = None
self.logger = None
self.logfilename = None
self.errmsg = None
self.parser = YamlOptions(self).parser
self.options, self.args = self.parser.parse_args()
self.options.quiet = self.options.q
self.options.verbose = self.options.v
#
# simple_main interface
#
def __call__(self, *args):
"Allows us to use self as the create_parser and create_program functions in call to simple_main"
return self
def parse_args(self):
"Called by simple_main to obtain results from parser returned by create_parser"
return self.options, self.args
def run(self):
"Called by simple_main to execute the program returned by create_program"
self.plan = Plan(self)
self.scenario_name = self.plan.name
self.logger = self.plan.logger
self.logfilename = self.plan.logfilename
self.errmsg = self.plan.errmsg
self.current = []
self.plan.run()
def cleanup(self):
"Called by simple_main to cleanup after program returned by create_program finishes"
pass
def simple(self):
"Delegates setup and control to mainUtils.simple_main"
simple_main(self, self)
#
# option parsing
#
class YamlOptions:
"YamlOptions"
def __init__(self, target):
"""
Scan the class doc string of the given object, looking for the %YAML
containing the option specification. Parse the YAML and setup the
corresponding OptionParser object.
"""
# target - options object (input)
# gname - option group name
self.y = get_yaml(target.__class__)
self.parser = OptionParser(description=self.y['Description'], version='%prog version $Revision$')
self.parser.remove_option('-h')
self.parser.set_usage(self.y['Usage'])
self.opty = self.y['Options']
for gname in self.opty.get('Groups', []):
self._register_group(gname)
def _register_group(self, gname):
"""
Register options for the specified option group name to the OptionParser
using an OptionGroup unless the group name starts with 'Help' in which
case we just register the options with the top level OptionParser object.
"""
# gname - option group name (input)
# gy - option group YAML object
# grp - option group object
# tgt - where to add options (parser or option group)
# optkey - comma separated list of option flags
# optval - help string or dict with detailed option settings
# listargs - list of option flags (e.g. ['-h', '--help'])
# dictargs - key/value arguments to add_option
gy = self.opty.get(gname, None)
if gname.startswith('Help'):
grp = None
tgt = self.parser
else:
grp = OptionGroup(self.parser, gname)
tgt = grp
for optkey, optval in gy.items():
listargs = optkey.split(',')
if type(optval) == type(''):
# short form: optval is just a help string
dictargs = {
'action': 'store_true',
'help': optval
}
else:
# optval is the complete option specification
dictargs = optval
# hide hidden options
if dictargs.get('help', '').startswith('hidden'):
dictargs['help'] = SUPPRESS_HELP
# print 'adding', listargs, dictargs
tgt.add_option(*listargs, **dictargs)
if grp is not None:
self.parser.add_option_group(grp)
#
# plan execution
#
class Task:
"Task"
def __init__(self, key, name, subtasks=None):
self.Key = key # task key
self.Name = name # task name
self.SubTasks = subtasks # subtasks, if any
self.Func = None # task function, set by _task
def _print(self, main, prefix):
print '%s %s %s:' % (prefix, self.Key, self.Name)
def _debug(self, main, prefix):
main.logger.debug('Execution Plan:%s %s %s%s' % (prefix, self.Key, self.Name, ':' if self.SubTasks else ''))
def _run(self, main, prefix):
main.logger.debug(' Now Executing:%s %s %s' % (prefix, self.Key, self.Name))
if self.Func:
self.Func()
class Exit(Exception):
def __init__(self, rc, code=None, call_support=False):
Exception.__init__(self)
self.code = code
self.prm = sys._getframe(1).f_locals
self.rc = rc
self.call_support = call_support
class Plan:
"Plan"
def __init__(self, main):
"""
Create cached yaml from class doc string of the given object,
looking for the %YAML indicating the beginning of the object's YAML plan and parse it.
Build the plan stages and tasks for the specified scenario.
"""
# main - object with yaml scenarios (input)
# sy - Stage yaml
self.logger = gplog.get_default_logger()
self.logfilename = gplog.get_logfile()
self.main = main
self.y = get_yaml(main.__class__)
self.name = main.options.scenario
if not self.name:
self.name = self.y['Default Scenario']
self.scenario = self.y['Scenarios'][self.name]
self.errors = self.y['Errors']
self.Tasks = [self._task(ty) for ty in self.scenario]
def _task(self, ty):
"Invoked by __init__ to build a top-level task from the YAML"
# ty - Task yaml (input)
# tyk - Task yaml key
# tyv - Task yaml value
# sty - Sub Task yaml
# t - Task (returned)
for tyk, tyv in ty.items():
key, workers = tyk.split(None, 1)
subtasks = [self._subtask(sty) for sty in tyv]
t = Task(key, workers, subtasks)
return t
def _subtask(self, sty):
"Invoked by _stage to build a task from the YAML"
# sty - Sub Task yaml (input)
# st - Sub Task (returned)
key, rest = sty.split(None, 1)
st = Task(key, rest)
fn = st.Name.lower().replace(' ', '_')
try:
st.Func = getattr(self.main, fn)
except AttributeError, e:
raise Exception("Failed to lookup '%s' for sub task '%s': %s" % (fn, st.Name, str(e)))
return st
def _dotasks(self, subtasks, prefix, action):
"Apply an action to each subtask recursively"
# st - Sub Task
for st in subtasks or []:
self.main.current.append(st)
action(st, self.main, prefix)
self._dotasks(st.SubTasks, ' ' + prefix, action)
self.main.current.pop()
def _print(self):
"Print in YAML form."
print '%s:' % self.name
self._dotasks(self.Tasks, ' -', lambda t, m, p: t._print(m, p))
def run(self):
"Run the stages and tasks."
self.logger.debug('Execution Plan: %s' % self.name)
self._dotasks(self.Tasks, ' -', lambda t, m, p: t._debug(m, p))
self.logger.debug(' Now Executing: %s' % self.name)
try:
self._dotasks(self.Tasks, ' -', lambda t, m, p: t._run(m, p))
except Exit, e:
self.exit(e.code, e.prm, e.rc, e.call_support)
def errmsg(self, code, prm={}):
"Return a formatted error message"
return self.errors[code] % prm
def exit(self, code=None, prm={}, rc=1, call_support=False):
"Terminate the application"
if code:
msg = self.errmsg(code, prm)
self.logger.error(msg)
if call_support:
self.logger.error('Please send %s to Greenplum support.' % self.logfilename)
self.logger.debug('exiting with status %(rc)s' % locals())
sys.exit(rc)
| apache-2.0 | 7,866,382,653,266,481,000 | 34.708266 | 119 | 0.61969 | false |
DolphinDream/sverchok | nodes/viz/viewer_gp.py | 2 | 10182 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import itertools
import bpy
# import mathutils
# from mathutils import Vector
# from bpy.props import FloatProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, fullList
def msg_box(message="", title="Message Box", icon='INFO'):
def msg_draw(self, context):
self.layout.label(text=message)
bpy.context.window_manager.popup_menu(msg_draw, title=title, icon=icon)
def set_correct_stroke_count(strokes, coords):
""" ensure that the number of strokes match the sets of coordinates """
diff = len(strokes) - len(coords)
if diff < 0:
# add new strokes
for _ in range(abs(diff)):
strokes.new() # colorname=BLACK.name)
elif diff > 0:
# remove excess strokes
for _ in range(diff):
strokes.remove(strokes[-1])
def pass_data_to_stroke(stroke, coord_set):
""" adjust the number of points per stroke, to match the incoming coord_set """
sdiff = len(stroke.points) - len(coord_set)
if sdiff < 0:
stroke.points.add(count=abs(sdiff))
elif sdiff > 0:
for _ in range(sdiff):
stroke.points.pop()
flat_coords = list(itertools.chain.from_iterable(coord_set))
stroke.points.foreach_set('co', flat_coords)
def pass_pressures_to_stroke(stroke, flat_pressures):
stroke.points.foreach_set('pressure', flat_pressures)
def match_points_and_pressures(pressure_set, num_points):
num_pressures = len(pressure_set)
if num_pressures < num_points:
fullList(pressure_set, num_points)
elif num_pressures > num_points:
pressure_set = pressure_set[:num_points]
return pressure_set
def get_palette(grease_pencil, palette_name=None):
palettes = bpy.data.palettes
if not palette_name in palettes:
palette = palettes.new(palette_name)
else:
palette = palettes.get(palette_name)
return palette
def remove_unused_colors(palette, strokes):
"""
optional cleanup step, probably best to not have this switched on by default
"""
# named_colors = [stroke.colorname for stroke in strokes] + [str([0,0,0])]
# unused_named_colors = {color.name for color in palette.colors} - set(named_colors)
# for unused_color in unused_named_colors:
# palette.colors.remove(palette.colors[unused_color])
pass
def ensure_gp_object(gp_object_name):
objects = bpy.data.objects
collections = bpy.data.collections
collection = collections.get(gp_object_name)
gp_object = collection.objects.get(gp_object_name)
if not gp_object:
gp_data = bpy.data.grease_pencils.new(gp_object_name)
gp_object = objects.new(gp_object_name, gp_data)
collection.objects.link(gp_object)
return gp_object
def ensure_layer_availability(gp_object):
# ensure a layer to draw to, at the moment only layer one.
if not gp_object.data.layers:
gp_object.data.layers.new("layer 1")
return gp_object.data.layers[0]
def ensure_frame_availability(layer, frame_number):
if not layer.frames:
# object has no frames
frame = layer.frames.new(frame_number)
else:
# object has frames, we look for frame number or create one if not present
frame = [f for f in layer.frames if f.frame_number == frame_number]
if len(frame) == 1:
frame = frame[0]
if not frame:
frame = layer.frames.new(frame_number)
return frame
class SvGreasePencilStrokes(bpy.types.Node, SverchCustomTreeNode):
''' Make GreasePencil Strokes '''
bl_idname = 'SvGreasePencilStrokes'
bl_label = 'Grease Pencil (BETA)'
bl_icon = 'GREASEPENCIL'
# SCREEN / 3DSPACE / 2DSPACE / 2DIMAGE
mode_options = [(k, k, '', i) for i, k in enumerate(['3DSPACE', '2DSPACE'])]
draw_mode: bpy.props.EnumProperty(
items=mode_options, description="Draw Mode",
default="2DSPACE", update=updateNode
)
stroke_color: bpy.props.FloatVectorProperty(
update=updateNode, name='Stroke', default=(0.958, 1.0, 0.897, 1.0),
size=4, min=0.0, max=1.0, subtype='COLOR'
)
fill_color: bpy.props.FloatVectorProperty(
update=updateNode, name='Fill', default=(0.2, 0.6, 0.9, 1.0),
size=4, min=0.0, max=1.0, subtype='COLOR'
)
auto_cleanup_colors: bpy.props.BoolProperty(default=True, update=updateNode)
use_hq_fill: bpy.props.BoolProperty(default=False, update=updateNode)
draw_cyclic: bpy.props.BoolProperty(default=True, update=updateNode)
pressure: bpy.props.FloatProperty(default=2.0, min=0.1, max=8.0, update=updateNode)
num_strokes: bpy.props.IntProperty()
active_sv_node: bpy.props.BoolProperty(name="Active", default=True, update=updateNode)
def local_updateNode(self, context):
print('changed name')
msg_box(message="hey.. don't use this for serious stuff, and don't do bugreports for this node", title="BETA NODE : Sverchok Info", icon='INFO')
updateNode(self, context)
gp_object_name: bpy.props.StringProperty(
default="", name="GP name",
description="This textfield is used to generate (or pickup) a Collection name and an associated GreasePencil object",
update=local_updateNode)
def sv_init(self, context):
inew = self.inputs.new
onew = self.outputs.new
inew('SvStringsSocket', 'frame').quick_link_to_node = "SvFrameInfoNodeMK2"
inew('SvVerticesSocket', 'coordinates') # per stroke
inew('SvStringsSocket', 'draw cyclic').prop_name = 'draw_cyclic' # per stroke
inew('SvStringsSocket', 'pressure').prop_name = 'pressure' # per point
inew('SvColorSocket', 'stroke color').prop_name = 'stroke_color'
inew('SvColorSocket', 'fill color').prop_name = 'fill_color'
onew('SvObjectSocket', 'object')
def draw_buttons(self, context, layout):
layout.prop(self, "active_sv_node")
layout.prop(self, "gp_object_name", text="", icon="GROUP")
def draw_buttons_ext(self, context, layout):
layout.prop(self, 'use_hq_fill', toggle=True)
layout.prop(self, 'auto_cleanup_colors', text='auto remove unused colors')
def get_pressures(self):
pressures = self.inputs["pressure"].sv_get()
num_strokes = self.num_strokes
# the default state will always
if len(pressures) == 1:
if len(pressures[0]) < num_strokes:
fullList(pressures[0], num_strokes)
elif len(pressures[0]) > num_strokes:
pressures[0] = pressures[0][:num_strokes]
pressures = [[n] for n in pressures[0]]
else:
fullList(pressures, num_strokes)
return pressures
def process(self):
# we have things to consider before doing any work.
if not self.active_sv_node:
return
if not self.gp_object_name:
return
frame_socket = self.inputs[0]
coordinates_socket = self.inputs[1]
if not (frame_socket.is_linked and coordinates_socket.is_linked):
return
try:
frame_number = frame_socket.sv_get()[0][0]
except:
frame_number = 1
colors = self.inputs["stroke color"]
fills = self.inputs["fill color"]
with self.sv_throttle_tree_update():
self.ensure_collection() # the collection name will be that of self.gp_object_name
gp_object = ensure_gp_object(self.gp_object_name)
layer = ensure_layer_availability(gp_object)
frame = ensure_frame_availability(layer, frame_number)
gp_materials = gp_object.data.materials
strokes = frame.strokes
GP_DATA = strokes.id_data
coords = coordinates_socket.sv_get()
self.num_strokes = len(coords)
set_correct_stroke_count(strokes, coords)
cols = colors.sv_get()[0]
fill_cols = fills.sv_get()[0]
cyclic_socket_value = self.inputs["draw cyclic"].sv_get()[0]
fullList(cyclic_socket_value, self.num_strokes)
fullList(cols, self.num_strokes)
fullList(fill_cols, self.num_strokes)
pressures = self.get_pressures()
for idx, (stroke, coord_set, color, fill) in enumerate(zip(strokes, coords, cols, fill_cols)):
color_name = f"{idx}_color_{self.gp_object_name}"
if color_name not in gp_materials:
mat = bpy.data.materials.new(color_name)
bpy.data.materials.create_gpencil_data(mat)
gp_materials.append(mat)
material = gp_materials.get(color_name)
material.grease_pencil.color = color
material.grease_pencil.fill_color = fill
material.grease_pencil.show_fill = True
material.grease_pencil.show_stroke = True
stroke.material_index = idx
stroke.draw_cyclic = cyclic_socket_value[idx]
num_points = len(coord_set)
pass_data_to_stroke(stroke, coord_set)
flat_pressures = match_points_and_pressures(pressures[idx], num_points)
# print(flat_pressures)
pass_pressures_to_stroke(stroke, flat_pressures)
stroke.line_width = 4
# remove_unused_colors(PALETTE, strokes)
self.outputs[0].sv_set([gp_object])
def ensure_collection(self):
collections = bpy.data.collections
if not collections.get(self.gp_object_name):
collection = collections.new(self.gp_object_name)
bpy.context.scene.collection.children.link(collection)
classes = [SvGreasePencilStrokes]
register, unregister = bpy.utils.register_classes_factory(classes)
| gpl-3.0 | 2,320,242,830,080,912,000 | 35.758123 | 152 | 0.634748 | false |
toddeye/home-assistant | homeassistant/components/switch/tellduslive.py | 2 | 1832 | """
homeassistant.components.switch.tellduslive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Tellstick switches using Tellstick Net and
the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.tellduslive/
"""
import logging
from homeassistant.components import tellduslive
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Find and return Tellstick switches. """
if discovery_info is None:
return
add_devices(TelldusLiveSwitch(switch) for switch in discovery_info)
class TelldusLiveSwitch(ToggleEntity):
""" Represents a Tellstick switch. """
def __init__(self, switch_id):
self._id = switch_id
self.update()
_LOGGER.debug("created switch %s", self)
def update(self):
tellduslive.NETWORK.update_switches()
self._switch = tellduslive.NETWORK.get_switch(self._id)
@property
def should_poll(self):
""" Tells Home Assistant to poll this entity. """
return True
@property
def name(self):
""" Returns the name of the switch if any. """
return self._switch["name"]
@property
def available(self):
return not self._switch.get("offline", False)
@property
def is_on(self):
""" True if switch is on. """
from tellive.live import const
return self._switch["state"] == const.TELLSTICK_TURNON
def turn_on(self, **kwargs):
""" Turns the switch on. """
tellduslive.NETWORK.turn_switch_on(self._id)
def turn_off(self, **kwargs):
""" Turns the switch off. """
tellduslive.NETWORK.turn_switch_off(self._id)
| mit | -4,169,689,991,394,150,400 | 27.625 | 74 | 0.646288 | false |
AntonChankin/thefuck | tests/rules/test_mvn_no_command.py | 13 | 3648 | import pytest
from thefuck.rules.mvn_no_command import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='mvn', stdout='[ERROR] No goals have been specified for this build. You must specify a valid lifecycle phase or a goal in the format <plugin-prefix>:<goal> or <plugin-group-id>:<plugin-artifact-id>[:<plugin-version>]:<goal>. Available lifecycle phases are: validate, initialize, generate-sources, process-sources, generate-resources, process-resources, compile, process-classes, generate-test-sources, process-test-sources, generate-test-resources, process-test-resources, test-compile, process-test-classes, test, prepare-package, package, pre-integration-test, integration-test, post-integration-test, verify, install, deploy, pre-clean, clean, post-clean, pre-site, site, post-site, site-deploy. -> [Help 1]')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command(script='mvn clean', stdout="""
[INFO] Scanning for projects...[INFO]
[INFO] ------------------------------------------------------------------------
[INFO] Building test 0.2
[INFO] ------------------------------------------------------------------------
[INFO]
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ test ---
[INFO] Deleting /home/mlk/code/test/target
[INFO] ------------------------------------------------------------------------
[INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 0.477s
[INFO] Finished at: Wed Aug 26 13:05:47 BST 2015
[INFO] Final Memory: 6M/240M
[INFO] ------------------------------------------------------------------------
"""),
Command(script='mvn --help'),
Command(script='mvn -v')
])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command(script='mvn', stdout='[ERROR] No goals have been specified for this build. You must specify a valid lifecycle phase or a goal in the format <plugin-prefix>:<goal> or <plugin-group-id>:<plugin-artifact-id>[:<plugin-version>]:<goal>. Available lifecycle phases are: validate, initialize, generate-sources, process-sources, generate-resources, process-resources, compile, process-classes, generate-test-sources, process-test-sources, generate-test-resources, process-test-resources, test-compile, process-test-classes, test, prepare-package, package, pre-integration-test, integration-test, post-integration-test, verify, install, deploy, pre-clean, clean, post-clean, pre-site, site, post-site, site-deploy. -> [Help 1]'), ['mvn clean package', 'mvn clean install']),
(Command(script='mvn -N', stdout='[ERROR] No goals have been specified for this build. You must specify a valid lifecycle phase or a goal in the format <plugin-prefix>:<goal> or <plugin-group-id>:<plugin-artifact-id>[:<plugin-version>]:<goal>. Available lifecycle phases are: validate, initialize, generate-sources, process-sources, generate-resources, process-resources, compile, process-classes, generate-test-sources, process-test-sources, generate-test-resources, process-test-resources, test-compile, process-test-classes, test, prepare-package, package, pre-integration-test, integration-test, post-integration-test, verify, install, deploy, pre-clean, clean, post-clean, pre-site, site, post-site, site-deploy. -> [Help 1]'), ['mvn -N clean package', 'mvn -N clean install'])])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| mit | -7,063,368,682,495,941,000 | 90.2 | 788 | 0.657346 | false |
twiest/openshift-tools | ansible/roles/lib_git/library/git_commit.py | 12 | 15635 | #!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it
Example:
with ssh_agent.SshAgent() as agent:
agent.add_key(private_key_string)
# do ssh stuff
# as agent loses scope, the ssh agent is killed
"""
from __future__ import with_statement
import atexit
import tempfile
import os
import sys
import shutil
import subprocess
import random
import time
import datetime
class SshAgentException(Exception):
"""An exception thrown for problems in SshAgent
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(SshAgentException, self).__init__(message)
class SshAgent(object):
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it.
The running agent can have one or more keys added (via the SshAgent.add_key()
method or via any other method that can find and talk to the running agent.
"""
class Cleanup(object):
"""A helper functor class for SshAgent
An object of this class can be passed
directly to atexit, which will call __call__() when the
program exits
"""
def __init__(self, ssh_agent, ssh_auth_sock_dir):
self.ssh_agent = ssh_agent
self.ssh_auth_sock_dir = ssh_auth_sock_dir
self.cleaned_up = False
self.original_env_var = os.environ.get('SSH_AUTH_SOCK')
def __call__(self):
if self.cleaned_up:
return
self.cleaned_up = True
try:
shutil.rmtree(self.ssh_auth_sock_dir, ignore_errors=True)
except OSError:
pass
try:
self.ssh_agent.kill()
except OSError:
pass
if self.original_env_var:
os.environ['SSH_AUTH_SOCK'] = self.original_env_var
else:
del os.environ['SSH_AUTH_SOCK']
def pass_(self):
"""A function to appease pylint"""
pass
def pass__(self):
"""Another function to appease pylint"""
self.pass_()
def __init__(self):
devnull = open(os.devnull, 'w')
# Start an ssh-agent process and register it to be killed atexit
self.ssh_auth_sock_dir = tempfile.mkdtemp(prefix=os.path.basename(sys.argv[0]) + '.')
self.ssh_auth_sock = os.path.join(self.ssh_auth_sock_dir, "ssh_agent")
self.ssh_agent = subprocess.Popen(["ssh-agent", "-d", "-a", self.ssh_auth_sock], stdout=devnull, stderr=devnull)
self.cleanup = self.Cleanup(self.ssh_agent, self.ssh_auth_sock_dir)
# this is here so that when python exits, we make sure that the agent is killed
# (in case python exits before our __del__() is called
atexit.register(self.cleanup)
os.environ["SSH_AUTH_SOCK"] = self.ssh_auth_sock
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tback):
self.cleanup()
def __del__(self):
self.cleanup()
def kill(self):
'''Explicitly kill the running ssh-agent
It's not necessary to call this function as the agent
will be cleaned up automatically.
'''
self.cleanup()
def add_key(self, key):
"""Add a key to the running agent.
Note:
This function can be called any number of times to add multiple keys.
Args:
key (str): A string containing the ssh private key to be added (the
actual key data, not the filename of a key)
Raises:
SshAgentException: when ssh-add does not immediately return (as in the
case of a private key with a passphrase)
"""
#if self.ssh_agent.poll() is None:
# raise SshAgentException("Unable to add ssh key. Did agent die?")
named_pipe_path = os.path.join(self.ssh_auth_sock_dir, "keypipe." + str(random.getrandbits(64)))
try:
os.mkfifo(named_pipe_path, 0600)
except OSError, exception:
print "Failed to create FIFO: %s" % exception
devnull = open(os.devnull, 'w')
ssh_add = subprocess.Popen(["ssh-add", named_pipe_path], stdout=devnull, stderr=devnull)
fifo = open(named_pipe_path, 'w')
print >> fifo, key
fifo.close()
#Popen.wait() doesn't have a timeout, so we'll implement one using poll() :(
start_time = datetime.datetime.now()
while ssh_add.poll() is None:
if (datetime.datetime.now() - start_time).total_seconds() > 5:
try:
ssh_add.kill()
except OSError:
pass
raise SshAgentException("Unable to add ssh key. Timed out. Does key have a passphrase?")
time.sleep(0.1)
os.remove(named_pipe_path)
# pylint: disable=too-many-lines
# these are already imported inside of the ssh library
#import os
#import subprocess
class GitCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GitCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
path,
verbose=False,
ssh_key=None,
author=None):
''' Constructor for GitCLI '''
self.path = path
self.verbose = verbose
self.ssh_key = ssh_key
self.author = author
self.environment_vars = os.environ.copy()
if self.author:
author_dict = {}
author_list = author.split('<')
author_dict['GIT_COMMITTER_NAME'] = author_list[0].strip()
author_dict['GIT_COMMITTER_EMAIL'] = author_list[0].strip()
self.environment_vars.update(author_dict)
def _add(self, files_to_add=None):
''' git add '''
cmd = ["add", "--no-ignore-removal"]
if files_to_add:
cmd.extend(files_to_add)
else:
cmd.append('.')
results = self.git_cmd(cmd)
return results
def _commit(self, msg, author=None):
''' git commit with message '''
cmd = ["commit", "-m", msg]
if author:
cmd += ["--author", author]
results = self.git_cmd(cmd)
return results
def _clone(self, repo, dest, bare=False):
''' git clone '''
cmd = ["clone"]
if bare:
cmd += ["--bare"]
cmd += [repo, dest]
results = self.git_cmd(cmd)
return results
def _fetch(self, remote):
''' git fetch '''
cmd = ["fetch"]
cmd += [remote]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _status(self, porcelain=False, show_untracked=True):
''' Do a git status '''
cmd = ["status"]
if porcelain:
cmd.append('--porcelain')
if show_untracked:
cmd.append('--untracked-files=normal')
else:
cmd.append('--untracked-files=no')
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _checkout(self, branch):
''' Do a git checkout to <branch> '''
cmd = ["checkout", branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _get_current_branch(self):
''' Do a git checkout to <branch> '''
cmd = ["describe", "--contains", "--all", "HEAD"]
results = self.git_cmd(cmd, output=True, output_type='raw')
results['results'] = results['results'].rstrip()
return results
def _merge(self, merge_id):
''' Do a git checkout to <branch> '''
cmd = ["merge", merge_id]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _push(self, remote, src_branch, dest_branch):
''' Do a git checkout to <branch> '''
push_branches = src_branch + ":" + dest_branch
cmd = ["push", remote, push_branches]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _remote_update(self):
''' Do a git remote update '''
cmd = ["remote", "update"]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _diff(self, diff_branch):
''' Do a git diff diff_branch'''
cmd = ["diff", diff_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _rebase(self, rebase_branch):
''' Do a git rebase rebase_branch'''
cmd = ["rebase", rebase_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _config(self, get_args):
''' Do a git config --get <get_args> '''
cmd = ["config", '--get', get_args]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def git_cmd(self, cmd, output=False, output_type='json'):
'''Base command for git '''
cmds = ['/usr/bin/git']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
if self.ssh_key:
with SshAgent() as agent:
self.environment_vars['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK']
agent.add_key(self.ssh_key)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
else:
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"cmd": cmds
})
else:
rval.update({"results": {}})
# Always include stdout/stderr:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class GitCommit(GitCLI):
''' Class to wrap the git command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
msg,
path,
commit_files,
author=None):
''' Constructor for GitCommit '''
super(GitCommit, self).__init__(path, author=author)
self.path = path
self.msg = msg
self.commit_files = commit_files
self.author = author
self.debug = []
os.chdir(path)
self.status_results = self._status(porcelain=True)
self.debug.append(self.status_results)
def get_files_to_commit(self):
''' do we have files to commit?'''
files_found_to_be_committed = []
# get the list of files that changed according to git status
git_status_out = self.status_results['results'].split('\n')
git_status_files = []
#clean up the data
for line in git_status_out:
file_name = line[3:]
if "->" in line:
file_name = file_name.split("->")[-1].strip()
git_status_files.append(file_name)
# Check if the files to be commited are in the git_status_files
for file_name in self.commit_files:
file_name = str(file_name)
for status_file in git_status_files:
if status_file.startswith(file_name):
files_found_to_be_committed.append(status_file)
return files_found_to_be_committed
def have_commits(self):
''' do we have files to commit?'''
# test the results
if self.status_results['results']:
return True
return False
def commit(self):
'''perform a git commit '''
if self.have_commits():
add_results = None
if self.commit_files:
files_to_add = self.get_files_to_commit()
if files_to_add:
add_results = self._add(files_to_add)
else:
add_results = self._add()
if add_results:
self.debug.append(add_results)
commit_results = self._commit(self.msg, self.author)
commit_results['debug'] = self.debug
return commit_results
return {'returncode': 0,
'results': {},
'no_commits': True,
'debug': self.debug
}
def main():
'''
ansible git module for committing
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
msg=dict(default=None, required=True, type='str'),
path=dict(default=None, required=True, type='str'),
author=dict(default=None, required=False, type='str'),
commit_files=dict(default=None, required=False, type='list'),
),
supports_check_mode=False,
)
git = GitCommit(module.params['msg'],
module.params['path'],
module.params['commit_files'],
module.params['author'],
)
state = module.params['state']
if state == 'present':
results = git.commit()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results.has_key('no_commits'):
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| apache-2.0 | 5,626,749,291,229,485,000 | 30.27 | 120 | 0.52165 | false |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Annotation/Testing/Python/xyPlot4.py | 20 | 7507 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This version of the script
# test ploting arbitrary array components (Momentum)
# without using the dataset to data object filter
# and its cryptic indexing of arrays.
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
# create three line probes
line = vtk.vtkLineSource()
line.SetResolution(30)
transL1 = vtk.vtkTransform()
transL1.Translate(3.7,0.0,28.37)
transL1.Scale(5,5,5)
transL1.RotateY(90)
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputConnection(line.GetOutputPort())
tf.SetTransform(transL1)
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(tf.GetOutputPort())
probe.SetSourceData(output)
transL2 = vtk.vtkTransform()
transL2.Translate(9.2,0.0,31.20)
transL2.Scale(5,5,5)
transL2.RotateY(90)
tf2 = vtk.vtkTransformPolyDataFilter()
tf2.SetInputConnection(line.GetOutputPort())
tf2.SetTransform(transL2)
probe2 = vtk.vtkProbeFilter()
probe2.SetInputConnection(tf2.GetOutputPort())
probe2.SetSourceData(output)
transL3 = vtk.vtkTransform()
transL3.Translate(13.27,0.0,33.40)
transL3.Scale(4.5,4.5,4.5)
transL3.RotateY(90)
tf3 = vtk.vtkTransformPolyDataFilter()
tf3.SetInputConnection(line.GetOutputPort())
tf3.SetTransform(transL3)
probe3 = vtk.vtkProbeFilter()
probe3.SetInputConnection(tf3.GetOutputPort())
probe3.SetSourceData(output)
appendF = vtk.vtkAppendPolyData()
appendF.AddInputData(probe.GetPolyDataOutput())
appendF.AddInputData(probe2.GetPolyDataOutput())
appendF.AddInputData(probe3.GetPolyDataOutput())
tuber = vtk.vtkTubeFilter()
tuber.SetInputConnection(appendF.GetOutputPort())
tuber.SetRadius(0.1)
lineMapper = vtk.vtkPolyDataMapper()
lineMapper.SetInputConnection(tuber.GetOutputPort())
lineActor = vtk.vtkActor()
lineActor.SetMapper(lineMapper)
# probe the line and plot it
triangle = vtk.vtkGlyphSource2D()
triangle.SetGlyphTypeToTriangle()
triangle.Update()
cross = vtk.vtkGlyphSource2D()
cross.SetGlyphTypeToCross()
cross.Update()
xyplot = vtk.vtkXYPlotActor()
xyplot.AddDataSetInputConnection(probe.GetOutputPort())
xyplot.AddDataSetInputConnection(probe2.GetOutputPort())
xyplot.AddDataSetInputConnection(probe3.GetOutputPort())
xyplot.GetPositionCoordinate().SetValue(0.0,0.67,0)
xyplot.GetPosition2Coordinate().SetValue(1.0,0.33,0)
#relative to Position
xyplot.SetXValuesToArcLength()
xyplot.SetNumberOfXLabels(6)
xyplot.SetTitle("Pressure vs. Arc Length (Zoomed View)")
xyplot.SetXTitle("")
xyplot.SetYTitle("P")
xyplot.SetXRange(.1,.35)
xyplot.SetYRange(.2,.4)
xyplot.GetProperty().SetColor(0,0,0)
xyplot.PlotLinesOn()
xyplot.GetProperty().SetLineWidth(1)
xyplot.PlotPointsOn()
xyplot.GetProperty().SetPointSize(3)
xyplot.LegendOn()
xyplot.SetPlotSymbol(2,triangle.GetOutput())
xyplot.SetPlotColor(2,0,0,1)
xyplot.SetPlotColor(1,0,0,0)
xyplot.SetPlotColor(0,0,0,0)
xyplot.SetGlyphSize(0.025)
# Set text prop color (same color for backward compat with test)
# Assign same object to all text props
tprop = xyplot.GetTitleTextProperty()
tprop.SetColor(xyplot.GetProperty().GetColor())
xyplot.SetAxisTitleTextProperty(tprop)
xyplot.SetAxisLabelTextProperty(tprop)
xyplot.SetLabelFormat("%-#6.2f")
vertexGlyph = vtk.vtkSphereSource()
vertexGlyph.Update()
xyplot2 = vtk.vtkXYPlotActor()
xyplot2.AddDataSetInputConnection(probe.GetOutputPort())
xyplot2.AddDataSetInputConnection(probe2.GetOutputPort())
xyplot2.AddDataSetInputConnection(probe3.GetOutputPort())
xyplot2.GetPositionCoordinate().SetValue(0.00,0.33,0)
xyplot2.GetPosition2Coordinate().SetValue(1.0,0.33,0)
#relative to Position
xyplot2.SetXValuesToNormalizedArcLength()
xyplot2.SetNumberOfXLabels(6)
xyplot2.SetXRange(0.2,1.0)
xyplot2.SetTitle("VTK Quality vs. Alcohol Consumption")
xyplot2.SetXTitle("")
xyplot2.SetYTitle("A")
xyplot2.PlotPointsOn()
xyplot2.PlotLinesOff()
xyplot2.LegendOn()
xyplot2.SetLegendPosition(0.4,0.6)
xyplot2.SetLegendPosition2(0.40,0.25)
xyplot2.GetProperty().SetColor(1,0,0)
xyplot2.GetProperty().SetPointSize(2)
xyplot2.SetPlotSymbol(0,vertexGlyph.GetOutput())
xyplot2.SetPlotLabel(0,"Ken's Mudslide Consumption")
xyplot2.SetPlotColor(0,1,0,0)
xyplot2.SetPlotSymbol(1,cross.GetOutput())
xyplot2.SetPlotColor(1,1,0,1)
xyplot2.SetPlotLabel(1,"Bill's Beer Consumption")
xyplot2.SetPlotSymbol(2,triangle.GetOutput())
xyplot2.SetPlotColor(2,0,0,1)
xyplot2.SetPlotLabel(2,"VTK Quality")
# Set text prop color (same color for backward compat with test)
# Assign same object to all text props
tprop = xyplot2.GetTitleTextProperty()
tprop.SetColor(xyplot2.GetProperty().GetColor())
xyplot2.SetAxisTitleTextProperty(tprop)
xyplot2.SetAxisLabelTextProperty(tprop)
xyplot2.SetLabelFormat(xyplot.GetLabelFormat())
xyplot3 = vtk.vtkXYPlotActor()
xyplot3.AddDataSetInputConnection(probe.GetOutputPort(),"Momentum",0)
xyplot3.AddDataSetInputConnection(probe.GetOutputPort(),"Density",0)
xyplot3.AddDataSetInputConnection(probe.GetOutputPort(),"Momentum",1)
xyplot3.AddDataSetInputConnection(probe.GetOutputPort(),"Momentum",2)
xyplot3.RemoveDataSetInputConnection(probe.GetOutputPort(),"Density",0)
xyplot3.SetPlotLabel(0,"Mx")
xyplot3.SetPlotColor(0,1,0,0)
xyplot3.SetPlotLabel(1,"My")
xyplot3.SetPlotColor(1,0,1,0)
xyplot3.SetPlotColor(2,0,0,1)
xyplot3.SetPlotLabel(2,"Mz")
#xyplot3 SetPlotColor 3 1 0 1
#xyplot3 SetPlotLabel 3 "D"
xyplot3.GetPositionCoordinate().SetValue(0.0,0.0,0)
xyplot3.GetPosition2Coordinate().SetValue(1.0,0.33,0)
#relative to Position
xyplot3.SetXValuesToIndex()
xyplot3.SetNumberOfXLabels(6)
xyplot3.SetTitle("Momentum Component vs. Point Id")
xyplot3.SetXTitle("Point Id")
xyplot3.SetYTitle("M")
xyplot3.PlotPointsOn()
xyplot3.GetProperty().SetColor(0,0,1)
xyplot3.GetProperty().SetPointSize(3)
xyplot3.LegendOn()
xyplot3.SetLegendPosition(0.8,0.28)
xyplot3.SetLegendPosition2(0.20,0.20)
# Set text prop color (same color for backward compat with test)
# Assign same object to all text props
tprop = xyplot3.GetTitleTextProperty()
tprop.SetColor(xyplot3.GetProperty().GetColor())
xyplot3.SetAxisTitleTextProperty(tprop)
xyplot3.SetAxisLabelTextProperty(tprop)
xyplot3.SetLabelFormat("%4.f")
# draw an outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
# Create graphics stuff
#
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(0.6784,0.8471,0.9020)
ren1.SetViewport(0,0,.5,1)
ren1.AddActor(outlineActor)
ren1.AddActor(lineActor)
ren2.SetBackground(1,1,1)
ren2.SetViewport(0.5,0.0,1.0,1.0)
ren2.AddActor2D(xyplot)
ren2.AddActor2D(xyplot2)
ren2.AddActor2D(xyplot3)
renWin.SetSize(790,400)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.95297,100)
cam1.SetFocalPoint(8.88908,0.595038,29.3342)
cam1.SetPosition(-12.3332,31.7479,41.2387)
cam1.SetViewUp(0.060772,-0.319905,0.945498)
iren.Initialize()
renWin.Render()
# render the image
#
# prevent the tk window from showing up then start the event loop
# --- end of script --
| gpl-3.0 | -2,813,464,151,114,149,400 | 34.079439 | 71 | 0.803384 | false |
GoogleCloudPlatform/analytics-componentized-patterns | retail/recommendation-system/bqml-scann/ann_grpc/match_pb2_grpc.py | 1 | 4364 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import match_pb2 as match__pb2
class MatchServiceStub(object):
"""MatchService is a Google managed service for efficient vector similarity
search at scale.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Match = channel.unary_unary(
'/google.cloud.aiplatform.container.v1alpha1.MatchService/Match',
request_serializer=match__pb2.MatchRequest.SerializeToString,
response_deserializer=match__pb2.MatchResponse.FromString,
)
self.BatchMatch = channel.unary_unary(
'/google.cloud.aiplatform.container.v1alpha1.MatchService/BatchMatch',
request_serializer=match__pb2.BatchMatchRequest.SerializeToString,
response_deserializer=match__pb2.BatchMatchResponse.FromString,
)
class MatchServiceServicer(object):
"""MatchService is a Google managed service for efficient vector similarity
search at scale.
"""
def Match(self, request, context):
"""Returns the nearest neighbors for the query. If it is a sharded
deployment, calls the other shards and aggregates the responses.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchMatch(self, request, context):
"""Returns the nearest neighbors for batch queries. If it is a sharded
deployment, calls the other shards and aggregates the responses.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MatchServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Match': grpc.unary_unary_rpc_method_handler(
servicer.Match,
request_deserializer=match__pb2.MatchRequest.FromString,
response_serializer=match__pb2.MatchResponse.SerializeToString,
),
'BatchMatch': grpc.unary_unary_rpc_method_handler(
servicer.BatchMatch,
request_deserializer=match__pb2.BatchMatchRequest.FromString,
response_serializer=match__pb2.BatchMatchResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.aiplatform.container.v1alpha1.MatchService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MatchService(object):
"""MatchService is a Google managed service for efficient vector similarity
search at scale.
"""
@staticmethod
def Match(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.cloud.aiplatform.container.v1alpha1.MatchService/Match',
match__pb2.MatchRequest.SerializeToString,
match__pb2.MatchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BatchMatch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.cloud.aiplatform.container.v1alpha1.MatchService/BatchMatch',
match__pb2.BatchMatchRequest.SerializeToString,
match__pb2.BatchMatchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| apache-2.0 | -5,007,113,471,602,414,000 | 39.036697 | 132 | 0.646196 | false |
SterlingPeet/madesigner | tests/demo-blend.py | 2 | 1355 | #!/usr/bin/env python
try:
import svgwrite
except ImportError:
import sys, os
sys.path.insert(0, os.path.abspath(os.path.split(os.path.abspath(__file__))[0]+'/..'))
import svgwrite
try:
import airfoil
import contour
import layout
except ImportError:
# if airfoil is not 'installed' append parent dir of __file__ to sys.path
import sys, os
sys.path.insert(0, os.path.abspath(os.path.split(os.path.abspath(__file__))[0]+'/../lib'))
import airfoil
import contour
import layout
root = airfoil.Airfoil("naca633618", 1000, True)
tip = airfoil.Airfoil("naca0015", 1000, True);
rchord = 8.0
tchord = 4.0
twist = 10
width = 8.5
height = 11
# show blending airfoils, scaling, rotating, and positioning
print "blending demo"
layout = layout.Layout( 'demo-blend', width, height )
steps = 8
dp = 1.0 / steps
for p in range(0, steps+1):
print p
percent = p*dp
rib = airfoil.blend( root, tip, percent )
size = rchord * (1.0 - percent) + tchord * percent
rib.scale( size, size )
rib.fit( 500, 0.002 )
tx = size/3.0
ty = rib.simple_interp(rib.top, tx)
by = rib.simple_interp(rib.bottom, tx)
vd = (ty - by)
hy = by + vd / 2.0
rib.add_label( tx, hy, 14, 0, "W" + str(p) )
rib.rotate( percent * twist )
layout.draw_part_demo( rib )
layout.save()
| gpl-2.0 | 8,593,519,020,748,511,000 | 21.966102 | 94 | 0.626568 | false |
hkwi/twink | twink/ofp5/parse.py | 1 | 51714 | from __future__ import absolute_import
import struct
from collections import namedtuple
from . import *
_len = len
_type = type
def _align(length):
return (length+7)//8*8
class _pos(object):
offset = 0
def _cursor(offset):
if isinstance(offset, _pos):
return offset
elif isinstance(offset, int):
ret = _pos()
ret.offset = offset
return ret
else:
raise ValueError(offset)
def _unpack(fmt, msg, offset):
cur = _cursor(offset)
if fmt[0] != "!":
fmt = "!"+fmt
ret = struct.unpack_from(fmt, msg, cur.offset)
cur.offset += struct.calcsize(fmt)
return ret
def from_bitmap(uint32_t_list):
ret = []
for o,i in zip(range(_len(uint32_t_list)),uint32_t_list):
for s in range(32):
if i & (1<<s):
ret.append(32*o + s)
return ret
def parse(message, offset=0):
if message is None:
return None
cursor = _cursor(offset)
header = ofp_header(message, cursor.offset)
assert header.version == 5
if header.type == OFPT_HELLO:
return ofp_hello(message, cursor)
elif header.type == OFPT_ERROR:
return ofp_error_msg(message, cursor)
elif header.type == OFPT_FEATURES_REPLY:
return ofp_switch_features(message, cursor)
elif header.type in (OFPT_SET_CONFIG, OFPT_GET_CONFIG_REPLY):
return ofp_switch_config(message, cursor)
elif header.type == OFPT_PACKET_IN:
return ofp_packet_in(message, cursor)
elif header.type == OFPT_FLOW_REMOVED:
return ofp_flow_removed(message, cursor)
elif header.type == OFPT_PORT_STATUS:
return ofp_port_status(message, cursor)
elif header.type == OFPT_PACKET_OUT:
return ofp_packet_out(message, cursor)
elif header.type == OFPT_FLOW_MOD:
return ofp_flow_mod(message, cursor)
elif header.type == OFPT_GROUP_MOD:
return ofp_group_mod(message, cursor)
elif header.type == OFPT_PORT_MOD:
return ofp_port_mod(message, cursor)
elif header.type == OFPT_TABLE_MOD:
return ofp_table_mod(message, cursor)
elif header.type == OFPT_MULTIPART_REQUEST:
return ofp_multipart_request(message, cursor)
elif header.type == OFPT_MULTIPART_REPLY:
return ofp_multipart_reply(message, cursor)
elif header.type == OFPT_EXPERIMENTER:
return ofp_experimenter_msg(message, cursor)
elif header.type in (OFPT_SET_ASYNC, OFPT_GET_ASYNC_REPLY):
return ofp_async_config(message, cursor)
elif header.type == OFPT_METER_MOD:
return ofp_meter_mod(message, cursor)
elif header.type == OFPT_ROLE_STATUS:
return ofp_role_status(message, cursor)
elif header.type == OFPT_TABLE_STATUS:
return ofp_table_status(message, cursor)
elif header.type == OFPT_REQUESTFORWARD:
return ofp_requestforward_(message, cursor)
elif header.type == OFPT_BUNDLE_CONTROL:
return ofp_bundle_ctrl_msg(message, cursor)
elif header.type == OFPT_BUNDLE_ADD_MESSAGE:
return ofp_bundle_add_msg(message, cursor)
else:
# OFPT_ECHO_REQUEST, OFPT_ECHO_REPLY
# OFPT_FEATURES_REQUEST
# OFPT_BARRIER_REQUEST, OFPT_BARRIER_REPLY
# OFPT_GET_ASYNC_REQUEST
return ofp_(message, cursor)
# 7.1
def ofp_header(message, offset):
cursor = _cursor(offset)
(version, type, length, xid) = _unpack("BBHI", message, cursor)
assert version == 5
return namedtuple("ofp_header",
"version type length xid")(
version,type,length,xid)
def ofp_(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
data = message[cursor.offset:offset+header.length]
cursor.offset = offset+header.length
return namedtuple("ofp_",
"header,data")(header, data)
# 7.2.1.1
def ofp_port(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(port_no,length,hw_addr,name,config,state) = _unpack("IH2x6s2x16sII", message, cursor)
name = name.partition(b"\0")[0]
while cursor.offset < offset+length:
h = ofp_port_desc_prop_header(message, cursor.offset)
if h.type == OFPPDPT_ETHERNET:
return ofp_port_desc_prop_ethernet(message, cursor)
elif h.type == OFPPDPT_OPTICAL:
return ofp_port_desc_prop_optical(message, cursor)
elif h.type == OFPPDPT_EXPERIMENTER:
return ofp_port_desc_prop_experimenter(message, cursor)
else:
raise ValueError(h)
return namedtuple("ofp_port",
"port_no,length,hw_addr,name,config,state,properties")(
port_no,length,hw_addr,name,config,state,properties)
# 7.2.1.2
def ofp_port_desc_prop_header(message, offset):
return namedtuple("ofp_port_desc_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_port_desc_prop_ethernet(message, offset):
return namedtuple("ofp_port_desc_prop_ethernet",
"type length curr advertised supported peer curr_speed max_speed")(
*_unpack("HH4x6I", message, offset))
def ofp_port_desc_prop_optical(message, offset):
return namedtuple("ofp_port_desc_prop_optical",
'''type length supported
tx_min_freq_lmda tx_max_freq_lmda tx_grid_freq_lmda
rx_min_freq_lmda rx_max_freq_lmda rx_grid_freq_lmda
tx_pwr_min tx_pwr_max''')(*_unpack("HH4x7I2H", message, offset))
def ofp_port_desc_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
experimenter_data = message[cursor.offset:offset+length]
cursor.offset = offset+length
return namedtuple("ofp_port_desc_prop_experimenter",
"type,length,experimenter,exp_type,experimenter_data")(
type,length,experimenter,exp_type,experimenter_data)
# 7.2.2.1
def ofp_match(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length) = _unpack("HH", message, cursor)
oxm_fields = message[cursor.offset:offset+length]
cursor.offset = offset+_align(length)
return namedtuple("ofp_match",
"type length oxm_fields")(type,length,oxm_fields)
# 7.2.2.8
def ofp_oxm_experimenter_header(message, offset):
return namedtuple("ofp_oxm_experimenter_header",
"oxm_header experimenter")(*_unpack("II", message, offset))
# 7.2.3
def ofp_instruction_header(message, offset):
return namedtuple("ofp_instruction_header",
"type len")(*_unpack("HH", message, offset))
def ofp_instruction_(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type, len) = ofp_instruction_header(message, cursor.offset)
if type == OFPIT_GOTO_TABLE:
return ofp_instruction_goto_table(message, cursor)
elif type == OFPIT_WRITE_METADATA:
return ofp_instruction_write_metadata(message, cursor)
elif type in (OFPIT_WRITE_ACTIONS, OFPIT_APPLY_ACTIONS, OFPIT_CLEAR_ACTIONS):
return ofp_instruction_actions(message, cursor)
elif type == OFPIT_METER:
return ofp_instruction_meter(message, cursor)
elif type == OFPIT_EXPERIMENTER:
return ofp_instruction_experimenter_(message, cursor)
else:
raise ValueError(ofp_instruction_header(message, cursor.offset))
def ofp_instruction_goto_table(message, offset):
return namedtuple("ofp_instruction_goto_table",
"type len table_id")(*_unpack("HHB3x", message, offset))
def ofp_instruction_write_metadata(message, offset):
return namedtuple("ofp_instruction_write_metadata",
"type len metadata metadata_mask")(*_unpack("HH4xQQ", message, offset))
def ofp_instruction_actions(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,len) = _unpack("HH4x", message, cursor)
actions = []
while cursor.offset < offset + len:
actions.append(ofp_action_(message,cursor))
assert cursor.offset == offset+len
return namedtuple("ofp_instruction_actions",
"type,len,actions")(type,len,actions)
def ofp_instruction_meter(message, offset):
return namedtuple("ofp_instruction_meter",
"type len meter_id")(*_unpack("HHI", message, offset))
def ofp_instruction_experimenter_header(message, offset):
# Note: no reference in spec
cursor = _cursor(offset)
offset = cursor.offset
(type,len,experimenter) = _unpack("HHI", message, cursor)
cursor.offset = offset+len
return namedtuple("ofp_instruction_experimenter_header",
"type len experimenter")(type,len,experimenter)
def ofp_instruction_experimenter_(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,len,experimenter) = _unpack("HHI", message, cursor)
data = message[cursor.offset:offset+len]
cursor.offset = offset+len
return namedtuple("ofp_instruction_experimenter_",
"type len experimenter data")(type,len,experimenter,data)
# 7.2.4
def ofp_action_header(message, offset):
return namedtuple("ofp_action_header",
"type,len")(*_unpack("HH", message, offset))
def ofp_action_(message, offset):
cursor = _cursor(offset)
header = ofp_action_header(message, cursor.offset)
if header.type == OFPAT_OUTPUT:
return ofp_action_output(message, cursor)
elif header.type == OFPAT_GROUP:
return ofp_action_group(message, cursor)
elif header.type == OFPAT_SET_QUEUE:
return ofp_action_set_queue(message, cursor)
elif header.type == OFPAT_SET_MPLS_TTL:
return ofp_action_mpls_ttl(message, cursor)
elif header.type == OFPAT_SET_NW_TTL:
return ofp_action_nw_ttl(message, cursor)
elif header.type in (OFPAT_PUSH_VLAN,OFPAT_PUSH_MPLS,OFPAT_PUSH_PBB):
return ofp_action_push(message, cursor)
elif header.type == OFPAT_POP_MPLS:
return ofp_action_pop_mpls(message, cursor)
elif header.type == OFPAT_SET_FIELD:
return ofp_action_set_field(message, cursor)
elif header.type == OFPAT_EXPERIMENTER:
return ofp_action_experimenter_(message, cursor)
elif header.type in (OFPAT_COPY_TTL_OUT, OFPAT_COPY_TTL_IN,
OFPAT_DEC_MPLS_TTL, OFPAT_DEC_NW_TTL, OFPAT_POP_VLAN, OFPAT_POP_PBB):
return ofp_action_generic(message, cursor)
else:
raise ValueError(header)
def ofp_action_output(message, offset):
return namedtuple("ofp_action_output",
"type,len,port,max_len")(*_unpack("HHIH6x", message, offset))
def ofp_action_group(message, offset):
return namedtuple("ofp_action_group",
"type,len,group_id")(*_unpack("HHI", message, offset))
def ofp_action_set_queue(message, offset):
return namedtuple("ofp_action_set_queue",
"type,len,queue_id")(*_unpack("HHI", message, offset))
def ofp_action_mpls_ttl(message, offset):
return namedutple("ofp_action_mpls_ttl",
"type,len,mpls_ttl")(*_unpack("HHB3x", message, offset))
def ofp_action_generic(message, offset):
return namedtuple("ofp_action_generic",
"type,len")(*_unpack("HH4x", message, offset))
def ofp_action_nw_ttl(message, offset):
return namedtuple("ofp_action_nw_ttl",
"type,len,nw_ttl")(*_unpack("HHB3x", message, offset))
def ofp_action_push(message, offset):
return namedtuple("ofp_action_push",
"type,len,ethertype")(*_unpack("HHH2x", message, offset))
def ofp_action_pop_mpls(message, offset):
return namedtuple("ofp_action_pop_mpls",
"type,len,ethertype")(*_unpack("HHH2x", message, offset))
def ofp_action_set_field(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,len) = _unpack("HH", message, cursor)
field = message[cursor.offset:offset+len]
cursor.offset = offset+len
return namedtuple("ofp_action_set_field",
"type,len,field")(type,len,field)
def ofp_action_experimenter_header(message, offset):
return namedtuple("ofp_action_experimenter_header",
"type,len,experimenter")(*_unpack("HHI", message, offset))
def ofp_action_experimenter_(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_action_experimenter_header(message, cursor)
data = message[cursor.offset:offset+header.len]
cursor.offset = offset + header.len
return namedtuple("ofp_action_experimenter_",
"type,len,experimenter,data")(*header+(data,))
# 7.2.5
def ofp_experimenter_structure(message, offset):
experimenter,exp_type = _unpack("II", message, offset)
experimenter_data = message[cursor.offset:] # XXX: not self-descriptive for data length
return namedtuple("ofp_experimenter_structure",
"experimenter,exp_type,experimenter_data")(
experimenter,exp_type,experimenter_data)
# 7.3.1
def ofp_switch_features(message, offset=0):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(datapath_id, n_buffers, n_tables,
auxiliary_id, capabilities, reserved) = _unpack("QIBB2xII", message, cursor)
return namedtuple("ofp_switch_features",
"header,datapath_id,n_buffers,n_tables,auxiliary_id,capabilities")(
header,datapath_id,n_buffers,n_tables,auxiliary_id,capabilities)
# 7.3.2
def ofp_switch_config(message, offset):
cursor = _cursor(offset)
header = ofp_header(message, cursor)
(flags,miss_send_len) = _unpack("HH", message, cursor)
return namedtuple("ofp_switch_config",
"header,flags,miss_send_len")(header,flags,miss_send_len)
# 7.3.4.1
def ofp_table_mod(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(table_id,config) = _unpack("B3xI", message, cursor)
properties = []
while cursor.offset < offset + header.length:
h = ofp_table_mod_prop_header(message, cursor.offset)
if h.type == OFPTMPT_EVICTION:
properties.append(ofp_table_mod_prop_eviction(message, cursor))
elif h.type == OFPTMPT_VACANCY:
properties.append(ofp_table_mod_prop_vacancy(message, cursor))
elif h.type == OFPTMPT_EXPERIMENTER:
properties.append(ofp_table_mod_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_table_mod",
"header,table_id,config,properties")(header,table_id,config,properties)
def ofp_table_mod_prop_header(message, offset):
return namedtuple("ofp_table_mod_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_table_mod_prop_eviction(message, offset):
return namedtuple("ofp_table_mod_prop_eviction",
"type length flags")(*_unpack("HHI", message, offset))
def ofp_table_mod_prop_vacancy(message, offset):
return namedtuple("ofp_table_mod_prop_vacancy",
"type length vacancy_down vacancy_up vacancy")(
*_unpack("HH3Bx", message, offset))
def ofp_table_mod_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
experimenter_data = message[cursor.offset:offset+length]
cursor.offset = offset + _align(length)
return namedtuple("ofp_table_mod_prop_experimenter",
"type,length,experimenter,exp_type,experimenter_data")(
type,length,experimenter,exp_type,experimenter_data)
# 7.3.4.2
def ofp_flow_mod(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(cookie,cookie_mask,table_id,command,
idle_timeout,hard_timeout,priority,
buffer_id,out_port,out_group,flags,
importance) = _unpack("QQBB3H3IHH", message, cursor)
match = ofp_match(message, cursor)
instructions = _list_fetch(message, cursor, offset+header.length, ofp_instruction_)
return namedtuple("ofp_flow_mod",
'''header,cookie,cookie_mask,table_id,command,
idle_timeout,hard_timeout,priority,
buffer_id,out_port,out_group,flags,importance,match,instructions''')(
header,cookie,cookie_mask,table_id,command,
idle_timeout,hard_timeout,priority,
buffer_id,out_port,out_group,flags,importance,match,instructions)
# 7.3.4.3
def ofp_group_mod(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(command,type,group_id) = _unpack("HBxI", message, cursor)
buckets = []
while cursor.offset < offset + header.length:
buckets.append(ofp_bucket(message, cursor))
return namedtuple("ofp_group_mod",
"header,command,type,group_id,buckets")(header,command,type,group_id,buckets)
def ofp_bucket(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(len,weight,watch_port,watch_group)=_unpack("HHII4x", message, cursor)
actions = []
while cursor.offset < offset+len:
actions.append(ofp_action_(message, cursor))
return namedtuple("ofp_bucket",
"len weight watch_port watch_group actions")(
len,weight,watch_port,watch_group,actions)
# 7.3.4.4
def ofp_port_mod(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(port_no,hw_addr,config,mask) = _unpack("I4x6s2xII", message, offset)
properties = []
while cursor.offset < offset + header.length:
h = ofp_port_mod_prop_header(message, cursor.offset)
if h.type == OFPPMPT_ETHERNET:
properties.append(ofp_port_mod_prop_ethernet(message, cursor))
elif h.type == OFPPMPT_OPTICAL:
properties.append(ofp_port_mod_prop_optical(message, cursor))
elif h.type == OFPPMPT_EXPERIMENTER:
properties.append(ofp_port_mod_prop_experimenter(message, cursor))
return namedtuple("ofp_port_mod",
"header,port_no,hw_addr,config,mask,properties")(
header,port_no,hw_addr,config,mask,properties)
def ofp_port_mod_prop_header(message, offset):
return namedtuple("ofp_port_mod_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_port_mod_prop_ethernet(message, offset):
return namedtuple("ofp_port_mod_prop_ethernet",
"type length advertise")(*_unpack("HHI", message, offset))
def ofp_port_mod_prop_optical(message, offset):
return namedtuple("ofp_port_mod_prop_optical",
"type length configure freq_lmda fl_offset grid_span tx_pwr")(
*_unpack("HHIIiII", message, offset))
def ofp_port_mod_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, offset)
experimenter_data = message[cursor.offset:offset+length]
cursor.offset = offset+_align(length)
return namedtuple("ofp_port_mod_prop_experimenter",
"type,length,experimenter,exp_type,experimenter_data")(
type,length,experimenter,exp_type,experimenter_data)
# 7.3.4.5
def ofp_meter_mod(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(command,flags,meter_id) = _unpack("HHI", message, cursor)
bands = []
while cursor.offset < offset + header.length:
bands.append(ofp_meter_band_(message, cursor))
return namedtuple("ofp_meter_mod",
"header,command,flags,meter_id,bands")(
header,command,flags,meter_id,bands)
def ofp_meter_band_header(message, offset):
return namedtuple("ofp_meter_band_header",
"type,len,rate,burst_size")(*_unpack("HHII", message, offset))
def ofp_meter_band_(message, offset):
cursor = _cursor(offset)
header = ofp_meter_band_header(message, cursor.offset)
if header.type == OFPMBT_DROP:
return ofp_meter_band_drop(message, cursor)
elif header.type == OFPMBT_DSCP_REMARK:
return ofp_meter_band_dscp_remark(message, cursor)
elif header.type == OFPMBT_EXPERIMENTER:
return ofp_meter_band_experimenter(message, cursor)
else:
raise ValueError(header)
def ofp_meter_band_drop(message, offset):
return namedtuple("ofp_meter_band_drop",
"type,len,rate,burst_size")(*_unpack("HHII4x", message, offset))
def ofp_meter_band_dscp_remark(message, offset):
return namedtuple("ofp_meter_band_dscp_remark",
"type,len,rate,burst_size,prec_level")(
*_unpack("HHIIB3x", message, offset))
def ofp_meter_band_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,len,rate,burst_size,experimenter) = _unpack("HH3I", message, offset)
data = message[cursor.offset:offset+len]
return namedtuple("ofp_meter_band_experimenter",
"type,len,rate,burst_size,experimenter,data")(
type,len,rate,burst_size,experimenter,data)
# 7.3.5
def ofp_multipart_request(message, offset=0):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(type, flags) = _unpack("HH4x", message, cursor)
if type in (OFPMP_DESC, OFPMP_TABLE, OFPMP_GROUP_DESC,
OFPMP_GROUP_FEATURES, OFPMP_METER_FEATURES, OFPMP_PORT_DESC, OFPMP_TABLE_DESC):
body = ""
elif type == OFPMP_FLOW:
body = ofp_flow_stats_request(message, cursor)
elif type == OFPMP_AGGREGATE:
body = ofp_aggregate_stats_request(message, cursor)
elif type == OFPMP_PORT_STATS:
body = ofp_port_stats_request(message, cursor)
elif type == OFPMP_QUEUE_STATS:
body = ofp_queue_stats_request(message, cursor)
elif type == OFPMP_GROUP:
body = ofp_group_stats_request(message, cursor)
elif type in (OFPMP_METER, OFPMP_METER_CONFIG):
body = ofp_meter_multipart_requests(message, cursor)
elif type == OFPMP_QUEUE_DESC:
body = ofp_queue_desc_request(message, cursor)
elif type == OFPMP_FLOW_MONITOR:
body = ofp_flow_monitor_request(message, cursor)
elif type == OFPMP_TABLE_FEATURES:
body = []
while cursor.offset < offset + header.length:
body.append(ofp_table_features(message, cursor))
elif type == OFPMP_EXPERIMENTER:
body = message[cursor.offset:offset+header.length]
cursor.offset = offset + header.length
else:
raise ValueError("multiaprt type=%d flags=%s" % (type, flags))
return namedtuple("ofp_multipart_request",
"header type flags body")(header,type,flags,body)
def ofp_multipart_reply(message, offset=0):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(type, flags) = _unpack("HH4x", message, cursor)
body = []
if type == OFPMP_DESC:
body = ofp_desc(message, cursor)
elif type == OFPMP_FLOW:
body = _list_fetch(message, cursor, offset + header.length, ofp_flow_stats)
elif type == OFPMP_AGGREGATE:
body = _list_fetch(message, cursor, offset + header.length, ofp_aggregate_stats_reply)
elif type == OFPMP_TABLE:
body = _list_fetch(message, cursor, offset + header.length, ofp_table_stats)
elif type == OFPMP_PORT_STATS:
body = _list_fetch(message, cursor, offset + header.length, ofp_port_stats)
elif type == OFPMP_QUEUE_STATS:
body = _list_fetch(message, cursor, offset + header.length, ofp_queue_stats)
elif type == OFPMP_GROUP:
body = _list_fetch(message, cursor, offset + header.length, ofp_group_stats)
elif type == OFPMP_GROUP_DESC:
body = _list_fetch(message, cursor, offset + header.length, ofp_group_desc)
elif type == OFPMP_GROUP_FEATURES:
body = ofp_group_features(message, cursor)
elif type == OFPMP_METER:
body = _list_fetch(message, cursor, offset + header.length, ofp_meter_stats)
elif type == OFPMP_METER_CONFIG:
body = _list_fetch(message, cursor, offset + header.length, ofp_meter_config)
elif type == OFPMP_METER_FEATURES:
body = ofp_meter_features(message, cursor)
elif type == OFPMP_TABLE_FEATURES:
body = _list_fetch(message, cursor, offset + header.length, ofp_table_features)
elif type == OFPMP_PORT_DESC:
body = _list_fetch(message, cursor, offset + header.length, ofp_port)
elif type == OFPMP_TABLE_DESC:
body = _list_fetch(message, cursor, offset + header.length, ofp_table_desc)
elif type == OFPMP_QUEUE_DESC:
body = _list_fetch(message, cursor, offset + header.length, ofp_queue_desc)
elif type == OFPMP_FLOW_MONITOR:
body = _list_fetch(message, cursor, offset + header.length, ofp_flow_update_header)
elif type == OFPMP_EXPERIMENTER:
body = ofp_experimenter_multipart_(message, cursor, offset+header.length)
else:
raise ValueError("multiaprt type=%d flags=%s" % (type, flags))
return namedtuple("ofp_multipart_reply",
"header type flags body")(header,type,flags,body)
def _list_fetch(message, cursor, limit, fetcher):
ret = []
while cursor.offset < limit:
ret.append(fetcher(message, cursor))
assert cursor.offset == limit
return ret
# 7.3.5.1
def ofp_desc(message, offset):
return namedtuple("ofp_desc",
"mfr_desc,hw_desc,sw_desc,serial_num,dp_desc")(*_unpack("256s256s256s32s256s", message, offset))
# 7.3.5.2
def ofp_flow_stats_request(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(table_id,out_port,out_group,cookie,cookie_mask) = _unpack("B3xII4xQQ", message, cursor)
match = ofp_match(message, cursor)
return namedtuple("ofp_flow_stats_request",
"table_id,out_port,out_group,cookie,cookie_mask,match")(
table_id,out_port,out_group,cookie,cookie_mask,match)
def ofp_flow_stats(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length,table_id,duration_sec,duration_nsec,priority,
idle_timeout,hard_timeout,flags,cookie,
packet_count,byte_count) = _unpack("HBxII4H4x3Q", message, cursor)
match = ofp_match(message, cursor)
instructions = _list_fetch(message, cursor, offset+length, ofp_instruction_)
return namedtuple("ofp_flow_stats", '''
length table_id duration_sec duration_nsec
priority idle_timeout hard_timeout flags cookie
packet_count byte_count match instructions''')(
length,table_id,duration_sec,duration_nsec,priority,
idle_timeout,hard_timeout,flags,cookie,
packet_count,byte_count,match,instructions)
# 7.3.5.3
def ofp_aggregate_stats_request(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(table_id,out_port,out_group,cookie,cookie_mask) = _unpack("B3xII4xQQ", message, cursor)
match = ofp_match(message, cursor)
return namedtuple("ofp_aggregate_stats_request",
"table_id,out_port,out_group,cookie,cookie_mask,match")(
table_id,out_port,out_group,cookie,cookie_mask,match)
def ofp_aggregate_stats_reply(message, offset):
return namedtuple("ofp_aggregate_stats_reply",
"packet_count,byte_count,flow_count")(
*_unpack("QQI4x", message, offset))
# 7.3.5.4
def ofp_table_stats(message, offset):
return namedtuple("ofp_table_stats", "table_id,out_port,out_group,cookie,cookie_mask")(
*_unpack("B3xIQQ", message, offset))
# 7.3.5.5
def ofp_table_desc(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length,table_id,config) = _unpack("HBxI", message, offset)
properties = []
while cursor.offset < offset + header.length:
h = ofp_table_mod_prop_header(message, cursor.offset)
if h.type == OFPTMPT_EVICTION:
properties.append(ofp_table_mod_prop_eviction(message, cursor))
elif h.type == OFPTMPT_VACANCY:
properties.append(ofp_table_mod_prop_vacancy(message, cursor))
elif h.type == OFPTMPT_EXPERIMENTER:
properties.append(ofp_table_mod_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_table_desc",
"length,table_id,config,properties")(
length,table_id,config,properties)
# 7.3.5.6.1
def ofp_table_features(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length,table_id,name,metadata_match,metadata_write,capabilities,max_entries) = _unpack("HB5x32sQQII", message, cursor)
name = name.partition(b'\0')[0]
properties = []
while cursor.offset < offset+length:
h = ofp_table_feature_prop_header(message, cursor.offset)
if h.type in (OFPTFPT_INSTRUCTIONS, OFPTFPT_INSTRUCTIONS_MISS):
properties.append(ofp_table_feature_prop_instructions(message, cursor))
elif h.type in (OFPTFPT_NEXT_TABLES, OFPTFPT_NEXT_TABLES_MISS, OFPTFPT_TABLE_SYNC_FROM):
properties.append(ofp_table_feature_prop_tables(message, cursor))
elif h.type in (OFPTFPT_WRITE_ACTIONS, OFPTFPT_WRITE_ACTIONS_MISS, OFPTFPT_APPLY_ACTIONS, OFPTFPT_APPLY_ACTIONS_MISS):
properties.append(ofp_table_feature_prop_actions(message, cursor))
elif h.type in (OFPTFPT_MATCH, OFPTFPT_WILDCARDS,
OFPTFPT_WRITE_SETFIELD, OFPTFPT_WRITE_SETFIELD_MISS,
OFPTFPT_APPLY_SETFIELD, OFPTFPT_APPLY_SETFIELD_MISS):
properties.append(ofp_table_feature_prop_oxm(message, cursor))
elif h.type in (OFPTFPT_EXPERIMENTER, OFPTFPT_EXPERIMENTER_MISS):
properties.append(ofp_table_feature_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_table_feature_prop_header",
"length,table_id,name,metadata_match,metadata_write,capabilities,max_entries,properties")(
length,table_id,name,metadata_match,metadata_write,capabilities,max_entries,properties)
# 7.3.5.6.2
def ofp_table_feature_prop_header(message, offset):
return namedtuple("ofp_table_feature_prop_header",
"type,length")(*_unpack("HH", message, offset))
def ofp_table_feature_prop_instructions(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length) = _unpack("HH", message, cursor)
instruction_ids = _list_fetch(message, cursor, offset+length, ofp_instruction_id)
cursor.offset = offset + _align(length)
return namedtuple("ofp_table_feature_prop_instructions",
"type,length,instruction_ids")(
type,length,instruction_ids)
def ofp_instruction_id(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,len) = _unpack("HH", message, cursor)
exp_data = message[cursor.offset:offset+len]
return namedtuple("ofp_instruction_id",
"type,len,exp_data")(type,len,exp_data)
def ofp_table_feature_prop_tables(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length) = _unpack("HH", message, cursor)
table_ids = _unpack("%dB" % (length-4), message, offset)
cursor.offset += _align(length)-length
return namedtuple("ofp_table_feature_prop_tables",
"type,length,table_ids")(type,length,table_ids)
def ofp_table_feature_prop_actions(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length) = _unpack("HH", message, cursor)
action_ids = _list_fetch(message, cursor. offset+length, ofp_action_id)
cursor.offset = offset + align(length)
return namedtuple("ofp_table_feature_prop_actions",
"type,length,action_ids")(type,length,action_ids)
def ofp_action_id(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,len) = _unpack("HH", message, cursor)
exp_data = message[cursor.offset:offset+len]
return namedtuple("ofp_action_id",
"type,len,exp_data")(type,len,exp_data)
def ofp_table_feature_prop_oxm(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length) = _unpack("HH", message, cursor)
oxm_ids = _unpack("%dI" % ((length-4)/4), message, cursor)
cursor.offset += _align(length)-length
return namedtuple("ofp_table_feature_prop_oxm",
"type,length,oxm_ids")(type,length,oxm_ids)
def ofp_table_feature_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
data = message[cursor.offset:offset+length]
cursor.offset += _align(length)-length
return namedtuple("ofp_table_feature_prop_experimenter",
"type,length,experimenter,exp_type,data")(
type,length,experimenter,exp_type,data)
# 7.3.5.7
def ofp_port_stats_request(message, offset):
return namedtuple("ofp_port_stats_request",
"port_no")(*_unpack("I4x", message, offset))
def ofp_port_stats(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
fixed = _unpack("H2xIII12Q2I", message, cursor)
properties = []
while cursor.offset < offset+fixed[0]:
h = ofp_port_stats_prop_header(message, cursor.offset)
if h.type == OFPPSPT_ETHERNET:
properties.append(ofp_port_stats_prop_ethernet(message, cursor))
elif h.type == OFPPSPT_OPTICAL:
properties.append(ofp_port_stats_prop_optical(message, cursor))
elif h.type == OFPPSPT_EXPERIMENTER:
properties.append(ofp_port_stats_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_port_stats", '''
length
port_no
duration_sec duration_nsec
rx_packets tx_packets
rx_bytes tx_bytes
rx_dropped tx_dropped
rx_errors tx_errors
properties''')(*fixed+(properties,))
def ofp_port_stats_prop_header(message, offset):
return namedtuple("ofp_port_stats_prop_header",
"type length")(*_unpack("HH",message,offset))
def ofp_port_stats_prop_ethernet(message, offset):
return namedtuple("ofp_port_stats_prop_ethernet",
"type length rx_frame_err rx_over_err rx_crc_err collisions")(
*_unpack("HH4x4Q", message, offset))
def ofp_port_stats_prop_optical(message, offset):
return namedtuple("ofp_port_stats_prop_optical",
'''type length flags
tx_freq_lmda tx_offset tx_grid_span
rx_freq_lmda rx_offset rx_grid_span
tx_pwr rx_pwr
bias_current temperature''')(*_unpack("HH4x7I4H", message, offset))
def ofp_port_stats_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
experimenter_data = message[cursor.offset:offset+length]
cursor.offset = offset + _align(length)
return namedtuple("ofp_port_stats_experimenter",
"type,length,experimenter,exp_type,experimenter_data")(
type,length,experimenter,exp_type,experimenter_data)
# 7.3.5.8
## skip ofp_port
# 7.3.5.9
def ofp_queue_stats_request(message, offset):
return namedtuple("ofp_queue_stats_request",
"port_no queue_id")(*_unpack("II", message, offset))
def ofp_queue_stats(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
fixed = _unpack("H6xII3QII", message, cursor)
properties = []
while cursor.offset < offset + fixed[0]:
h = ofp_queue_stats_prop_header(message, cursor.offset)
if h.type == OFPQSPT_EXPERIMENTER:
properties.append(ofp_queue_stats_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_queue_stats",
'''length port_no queue_id
tx_bytes tx_packets tx_errors
duration_sec duration_nsec''')(
*fixed+(properties,))
def ofp_queue_stats_prop_header(message, offset):
return namedtuple("ofp_queue_stats_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_queue_stats_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
experimenter_data = message[cursor.offset:offset+length]
cursor.offset = offset + _align(length)
return namedtuple("ofp_queue_stats_prop_experimenter",
"type,length,experimenter,exp_type,experimenter_data")(
type,length,experimenter,exp_type,experimenter_data)
# 7.3.5.10
def ofp_queue_desc_request(message, offset):
return namedtuple("ofp_queue_desc_request",
"port_no queue_id")(*_unpack("II", message, offset))
def ofp_queue_desc(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(port_no,queue_id,len) = _unpack("IIH6x", message, cursor)
properties = []
while cursor.offset < offset + len:
h = ofp_queue_desc_prop_header(message, cursor.offset)
if h.type == OFPQDPT_MIN_RATE:
properties.append(ofp_queue_desc_prop_min_rate(message, cursor))
elif h.type == OFPQDPT_MAX_RATE:
properties.append(ofp_queue_desc_prop_max_rate(message, cursor))
elif h.type == OFPQDPT_EXPERIMENTER:
properties.append(ofp_queue_desc_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_queue_desc",
"port_no queue_id len properties")(
port_no,queue_id,len,properties)
def ofp_queue_desc_prop_header(message, offset):
return namedtuple("ofp_queue_desc_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_queue_desc_prop_min_rate(message, offset):
return namedtuple("ofp_queue_desc_prop_min_rate",
"type length rate")(*_unpack("HHH2x", message, offset))
def ofp_queue_desc_prop_max_rate(message, offset):
return namedtuple("ofp_queue_desc_prop_max_rate",
"type length rate")(*_unpack("HHH2x", message, offset))
def ofp_queue_desc_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
experimenter_data = message[cursor.offset:offset+length]
cursor.offset = offset + _align(length)
return namedtuple("ofp_queue_desc_prop_experimenter",
"type,length,experimenter,exp_type,experimenter_data")(
type,length,experimenter,exp_type,experimenter_data)
# 7.3.5.11
def ofp_group_stats_request(message, offset):
return namedtuple("ofp_group_stats_request",
"group_id")(*_unpack("I4x", message, offset))
def ofp_group_stats(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length, group_id, ref_count, packet_count, byte_count,
duration_sec, duration_nsec) = _unpack("H2xII4xQQII", message, cursor)
bucket_stats = _list_fetch(message, cursor, offset+length, ofp_bucket_counter)
return namedtuple("ofp_group_stats", '''
length group_id ref_count packet_count byte_count
duration_sec duration_nsec bucket_stats''')(
length,group_id,ref_count,packet_count,byte_count,
duration_sec,duration_nsec,bucket_stats)
def ofp_bucket_counter(message, offset):
return namedtuple("ofp_bucket_counter",
"packet_count byte_count")(*_unpack("QQ", message, offset))
# 7.3.5.12
def ofp_group_desc(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length, type, group_id) = _unpack("HBxI", message, cursor)
buckets = _list_fetch(message, cursor, offset+length, ofp_bucket)
return namedtuple("ofp_group_desc",
"length type group_id buckets")(
length,type,group_id,buckets)
# 7.3.5.13
def ofp_group_features(message, offset):
cursor = _cursor(offset)
(type,capabilities) = _unpack("II", message, cursor)
max_groups = _unpack("4I", message, cursor)
actions = _unpack("4I", message, cursor)
return namedtuple("ofp_group_features",
"type,capabilities,max_groups,actions")(
type,capabilities,max_groups,actions)
# 7.3.5.14
def ofp_meter_multipart_request(message, offset):
return namedtuple("ofp_meter_multipart_request",
"meter_id")(*_unpack("I4x", message, offset))
def ofp_meter_stats(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(meter_id,len,flow_count,packet_in_count,byte_in_count,
duration_sec,duration_nsec) = _unpack("IH6xIQQII", message, cursor)
band_stats = _list_fetch(message, cursor, offset+len, ofp_meter_band_stats)
return namedtuple("ofp_meter_stats", '''
meter_id len flow_count packet_in_count byte_in_count
duration_sec duration_nsec band_stats''')(
meter_id,len,flow_count,packet_in_count,byte_in_count,
duration_sec,duration_nsec,band_stats)
def ofp_meter_band_stats(message, offset):
return namedtuple("ofp_meter_band_stats",
"packet_band_count,byte_band_count")(*_unpack("QQ", message, offset))
# 7.3.5.15
## skip ofp_meter_multipart_request
def ofp_meter_config(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length,flags,meter_id) = _unpack("HHI", message, cursor)
bands = _list_fetch(message, cursor, offset+length, ofp_meter_band_)
return namedtuple("ofp_meter_config",
"length,flags,meter_id,bands")(
length,flags,meter_id,bands)
# 7.3.5.16
def ofp_meter_features(message, offset):
return namedtuple("ofp_meter_features", '''
max_meter band_types capabilities
max_bands max_color''')(*_unpack("3IBB2x", message, offset))
# 7.3.5.17.1
def ofp_flow_monitor_request(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(monitor_id,out_port,out_group,flags,table_id,command) = _unpack("IIIHBB", message, cursor)
match = ofp_match(message, cursor)
return namedtuple("ofp_flow_monitor_request",
"monitor_id,out_port,out_group,flags,table_id,command,match")(
monitor_id,out_port,out_group,flags,table_id,command,match)
# 7.3.5.17.2
def ofp_flow_update_header(message, offset):
return namedtuple("ofp_flow_update_header",
"length event")(*_unpack("HH", message, offset))
def ofp_flow_update_full(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length,event,table_id,reason,
idle_timeout,hard_timeout,
priority,zero,cookie) = _unpack("HHBBHHHIQ", message, cursor)
assert zero==0
match = ofp_match(message, cursor)
instructions = []
while cursor.offset < offset + length:
instructions.append(ofp_instruction_(message,cursor))
return namedtuple("ofp_flow_monitor_request",
'''length,event,table_id,reason,
idle_timeout,hard_timeout,
priority,cookie,match,instructions''')(
length,event,table_id,reason,
idle_timeout,hard_timeout,
priority,cookie,match,instructions)
def ofp_flow_update_abbrev(message, offset):
return namedtuple("ofp_flow_update_abbrev",
"length,event,xid")(*_unpack("HHI", message, offset))
def ofp_flow_update_paused(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(length,event,zero) = _unpack("HHI", message, cursor)
assert zero==0
return namedtuple("ofp_flow_update_paused",
"length,event")(length,event)
# 7.3.5.18
def ofp_experimenter_multipart_header(message, offset):
return namedtuple("ofp_experimenter_multipart_header",
"experimenter,exp_type")(*_unpack("II", message, offset))
def ofp_experimenter_multipart_(message, offset, limit):
cursor = _cursor(offset)
offset = cursor.offset
(experimenter,exp_type) = ofp_experimenter_multipart_header(message, cursor)
data = message[cursor.offset:limit]
cursor.offset = limit
return namedtuple("ofp_experimenter_multipart_",
"experimenter,exp_type,data")(experimenter,exp_type,data)
# 7.3.6
def ofp_packet_out(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(buffer_id, in_port, actions_len) = _unpack("IIH6x", message, cursor)
actions_end = cursor.offset + actions_len
actions = []
while cursor.offset < actions_end:
actions.append(ofp_action_(message, cursor))
data = message[cursor.offset:offset+header.length]
cursor.offset = offset+header.length
return namedtuple("ofp_packet_out",
"header,buffer_id,in_port,actions_len,actions,data")(
header,buffer_id,in_port,actions_len,actions,data)
# 7.3.8
def ofp_role_request(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(role,generation_id) = _unpack("I4xQ", message, cursor)
return namedtuple("ofp_role_request",
"header,role,generation_id")(header,role,generation_id)
# 7.3.9.1
def ofp_bundle_ctrl_msg(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(bundle_id,type,flags) = _unpack("IHH", message, cursor)
properties = []
while cursor.offset < offset + header.length:
h = ofp_bundle_prop_header(message, cursor.offset)
if h.type == OFPBPTEXPERIMENTER:
properties.append(ofp_bundle_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_bundle_ctrl_msg",
"header,bundle_id,type,flags,properties")(
header,bundle_id,type,flags,properties)
# 7.3.9.2
def ofp_bundle_add_msg(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(bundle_id,flags) = _unpack("I2xH", message, cursor)
message = parse(message, cursor)
properties = []
while cursor.offset < offset + header.length:
h = ofp_bundle_prop_header(message, cursor.offset)
if h.type == OFPBPTEXPERIMENTER:
properties.append(ofp_bundle_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_bundle_add_msg",
"header,bundle_id,flags,message,properties")(
header,bundle_id,flags,message,properties)
def ofp_bundle_prop_header(message, offset):
return namedtuple("ofp_bundle_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_bundle_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
data = message[cursor.offset:offset+length]
cursor.offset = offset+length
return namedtuple("ofp_bundle_prop_experimenter",
"type,length,experimenter,exp_type,data")(
type,length,experimenter,exp_type,data)
# 7.3.10
def ofp_async_config(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
properties = []
while cursor.offset < offset + header.length:
h = ofp_async_config_prop_header(message, cursor.offset)
if h.type in (OFPACPT_PACKET_IN_SLAVE, OFPACPT_PACKET_IN_MASTER,
OFPACPT_PORT_STATUS_SLAVE, OFPACPT_PORT_STATUS_MASTER,
OFPACPT_FLOW_REMOVED_SLAVE, OFPACPT_FLOW_REMOVED_MASTER,
OFPACPT_ROLE_STATUS_SLAVE, OFPACPT_ROLE_STATUS_MASTER,
OFPACPT_REQUESTFORWARD_SLAVE, OFPACPT_REQUESTFORWARD_MASTER):
properties.append(ofp_async_config_prop_reasons(message, cursor))
elif h.type in (OFPTFPT_EXPERIMENTER_SLAVE, OFPTFPT_EXPERIMENTER_MASTER):
properties.append(ofp_async_config_prop_experimenter(message, cursor))
else:
raise ValueError(h)
return namedtuple("ofp_async_config",
"header,properties")(
header,properties)
def ofp_async_config_prop_header(message, offset):
return namedtuple("ofp_async_config_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_async_config_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, cursor)
data = message[cursor.offset:offset+header.length]
cursor.offset = offset+header.length
return namedtuple("ofp_async_config_prop_experimenter",
"type length experimenter exp_type data")(
type,length,experimenter,exp_type,data)
def ofp_async_config_prop_reasons(message, offset):
return namedtuple("ofp_async_config_prop_reasons",
"type length mask")(*_unpack("HHI", message, offset))
# 7.4.1
def ofp_packet_in(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(buffer_id, total_len, reason, table_id, cookie) = _unpack("IHBBQ", message, cursor)
match = ofp_match(message, cursor)
_unpack("2x", message, cursor);
data = message[cursor.offset:offset+header.length]
cursor.offset = offset+header.length
return namedtuple("ofp_packet_in",
"header,buffer_id,total_len,reason,table_id,cookie,match,data")(
header,buffer_id,total_len,reason,table_id,cookie,match,data)
# 7.4.2
def ofp_flow_removed(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(cookie,priority,reason,table_id,
duration_sec,duration_nsec,
idle_timeout,hard_timeout,packet_count,byte_count) = _unpack("QHBBIIHHQQ", message, cursor)
match = ofp_match(message, cursor)
return namedtuple("ofp_flow_removed",
'''header cookie priority reason table_id
duration_sec duration_nsec
idle_timeout hard_timeout packet_count byte_count
match''')(
header,cookie,priority,reason,table_id,
duration_sec,duration_nsec,
idle_timeout,hard_timeout,packet_count,byte_count,
match)
# 7.4.3
def ofp_port_status(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(reason,) = _unpack("B7x", message, cursor)
desc = ofp_port(message, cursor)
return namedtuple("ofp_port_status",
"header,reason,desc")(
header,reason,desc)
# 7.4.4
def ofp_role_status(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(role,reason,generation_id) = _unpack("IB3xQ", message, offset)
properties = []
while cursor.offset < offset + header.length:
h = ofp_role_prop_header(message, cursor.offset)
if h.type == OFPRPT_EXPERIMENTER:
properties.append(ofp_role_prop_experimenter(message, offset))
else:
raise ValueError(h)
return namedtuple("ofp_role_status",
"header,role,reason,generation_id,properties")(
header,role,reason,generation_id,properties)
def ofp_role_prop_header(message, offset):
return namedtuple("ofp_role_prop_header",
"type length")(*_unpack("HH", message, offset))
def ofp_role_prop_experimenter(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
(type,length,experimenter,exp_type) = _unpack("HHII", message, offset)
experimenter_data = message[cursor.offset:offset+length]
return namedtuple("ofp_role_prop_experimenter",
"type,length,experimenter,exp_type,experimenter_data")(
type,length,experimenter,exp_type,experimenter_data)
# 7.4.5
def ofp_table_status(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(reason,) = _unpack("B7x", message, cursor)
table = ofp_table_desc(message, cursor)
return namedtuple("ofp_table_status",
"header,reason,table")(header,reason,table)
# 7.4.6
def ofp_requestforward_header(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
request = ofp_header(message, cursor)
return namedtuple("ofp_requestforward_header",
"header request")(header, request)
def ofp_requestforward_(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
request = parse(message, cursor)
return namedtuple("ofp_requestforward_",
"header request")(header, request)
# 7.5.1
def ofp_hello(message, offset=0):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
elements = []
while cursor.offset < offset + header.length:
elem_header = ofp_hello_elem_header(message, cursor.offset)
if elem_header.type == 1:
elements.append(ofp_hello_elem_versionbitmap(message, cursor))
else:
raise ValueError("message offset=%d %s" % (cursor.offset, elem_header))
assert cursor.offset == offset + header.length, (cursor.offset, offset, header.length)
return namedtuple("ofp_hello", "header elements")(header, elements)
def ofp_hello_elem_header(message, offset):
return namedtuple("ofp_hello_elem_header",
"type length")(*_unpack("HH", message, offset))
def ofp_hello_elem_versionbitmap(message, offset):
cursor = _cursor(offset)
(type, length) = _unpack("HH", message, cursor)
assert type == OFPHET_VERSIONBITMAP
bitmaps = _unpack("%dI" % ((length-4)/4), message, cursor)
cursor.offset += _align(length) - length
return namedtuple("ofp_hello_elem_versionbitmap",
"type length bitmaps")(type,length,bitmaps)
# 7.5.4
def ofp_error_msg(message, offset=0):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(type, code) = _unpack("HH", message, cursor)
data = message[cursor.offset:offset+header.length]
cursor.offset = offset + header.length
return namedtuple("ofp_error_msg",
"header,type,code,data")(header,type,code,data)
def ofp_error_experimenter_msg(message, offset=0):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(type, exp_code, experimenter) = _unpack("HHI", message, cursor)
data = message[cursor.offset:offset+header.length]
cursor.offset = offset + header.length
return namedtuple("ofp_error_experimenter_msg",
"header,type,exp_code,experimenter,data")(
header,type,exp_code,experimenter,data)
# 7.5.5
def ofp_experimenter_msg(message, offset):
cursor = _cursor(offset)
offset = cursor.offset
header = ofp_header(message, cursor)
(experimenter,exp_type) = _unpack("II", message, cursor)
data = message[cursor.offset:offset+header.length]
cursor.offset = offset+header.length
return namedtuple("ofp_experimenter_msg",
"header,experimenter,exp_type,data")(header,experimenter,exp_type,data)
| apache-2.0 | -7,787,175,382,925,324,000 | 32.453031 | 120 | 0.707101 | false |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/rnn/python/ops/gru_ops.py | 24 | 5476 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the Block GRU Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.ops import gen_gru_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_gru_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_gru_ops.so"))
@ops.RegisterGradient("GRUBlockCell")
def _GRUBlockCellGrad(op, *grad):
r"""Gradient for GRUBlockCell.
Args:
op: Op for which the gradient is defined.
*grad: Gradients of the optimization function wrt output
for the Op.
Returns:
d_x: Gradients wrt to x
d_h: Gradients wrt to h
d_w_ru: Gradients wrt to w_ru
d_w_c: Gradients wrt to w_c
d_b_ru: Gradients wrt to b_ru
d_b_c: Gradients wrt to b_c
Mathematics behind the Gradients below:
```
d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
d_r_bar_u_bar = [d_r_bar d_u_bar]
[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
d_x = d_x_component_1 + d_x_component_2
d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
```
Below calculation is performed in the python wrapper for the Gradients
(not in the gradient kernel.)
```
d_w_ru = x_h_prevr^T * d_c_bar
d_w_c = x_h_prev^T * d_r_bar_u_bar
d_b_ru = sum of d_r_bar_u_bar along axis = 0
d_b_c = sum of d_c_bar along axis = 0
```
"""
x, h_prev, w_ru, w_c, b_ru, b_c = op.inputs
r, u, c, _ = op.outputs
_, _, _, d_h = grad
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = gen_gru_ops.gru_block_cell_grad(
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
x_h_prev = array_ops.concat([x, h_prev], 1)
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
x_h_prevr = array_ops.concat([x, h_prev * r], 1)
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
d_b_c = nn_ops.bias_add_grad(d_c_bar)
return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c
class GRUBlockCell(rnn_cell_impl.RNNCell):
r"""Block GRU cell implementation.
The implementation is based on: http://arxiv.org/abs/1406.1078
Computes the GRU cell forward propagation for 1 time step.
This kernel op implements the following mathematical equations:
Biases are initialized with:
* `b_ru` - constant_initializer(1.0)
* `b_c` - constant_initializer(0.0)
```
x_h_prev = [x, h_prev]
[r_bar u_bar] = x_h_prev * w_ru + b_ru
r = sigmoid(r_bar)
u = sigmoid(u_bar)
h_prevr = h_prev \circ r
x_h_prevr = [x h_prevr]
c_bar = x_h_prevr * w_c + b_c
c = tanh(c_bar)
h = (1-u) \circ c + u \circ h_prev
```
"""
def __init__(self, cell_size):
"""Initialize the Block GRU cell.
Args:
cell_size: int, GRU cell size.
"""
self._cell_size = cell_size
@property
def state_size(self):
return self._cell_size
@property
def output_size(self):
return self._cell_size
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
w_ru = vs.get_variable("w_ru", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"b_ru", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
w_c = vs.get_variable("w_c",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"b_c", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
| mit | -5,857,453,629,991,839,000 | 29.592179 | 82 | 0.622535 | false |
orgito/ansible | lib/ansible/module_utils/basic.py | 2 | 116522 | # Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
PASS_VARS = {
'check_mode': 'check_mode',
'debug': '_debug',
'diff': '_diff',
'keep_remote_files': '_keep_remote_files',
'module_name': '_name',
'no_log': 'no_log',
'remote_tmp': '_remote_tmp',
'selinux_special_fs': '_selinux_special_fs',
'shell_executable': '_shell',
'socket': '_socket_path',
'syslog_facility': '_syslog_facility',
'tmpdir': '_tmpdir',
'verbosity': '_verbosity',
'version': 'ansible_version',
}
PASS_BOOLS = ('no_log', 'debug', 'diff')
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import __main__
import atexit
import locale
import os
import re
import shlex
import signal
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
import json
# Detect the python-json library which is incompatible
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json and was not found!", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
# we may have been able to import md5 but it could still not be available
try:
hashlib.md5()
except ValueError:
algorithms.pop('md5', None)
except Exception:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except Exception:
pass
from ansible.module_utils.common._collections_compat import (
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.file import (
_PERM_BITS as PERM_BITS,
_EXEC_PERM_BITS as EXEC_PERM_BITS,
_DEFAULT_PERM as DEFAULT_PERM,
is_executable,
format_attributes,
get_flags_from_attributes,
)
from ansible.module_utils.common.sys_info import (
get_distribution,
get_distribution_version,
get_platform_subclass,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
attributes=dict(aliases=['attr']),
# The following are not about perms and should not be in a rewritten file_common_args
src=dict(), # Maybe dest or path would be appropriate but src is not
follow=dict(type='bool', default=False), # Maybe follow is appropriate because it determines whether to follow symlinks for permission purposes too
force=dict(type='bool'),
# not taken by the file module, but other action plugins call the file module so this ignores
# them for now. In the future, the caller should take care of removing these from the module
# arguments before calling the file module.
content=dict(no_log=True), # used by copy
backup=dict(), # Used by a few modules to create a remote backup before updating the file
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info[:2] >= (3, 5)
_PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
#
# Deprecated functions
#
def get_platform():
'''
**Deprecated** Use :py:func:`platform.system` directly.
:returns: Name of the platform the module is running on in a native string
Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is
the result of calling :py:func:`platform.system`.
'''
return platform.system()
# End deprecated functions
#
# Compat shims
#
def load_platform_subclass(cls, *args, **kwargs):
"""**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead"""
platform_cls = get_platform_subclass(cls)
return super(cls, platform_cls).__new__(platform_cls)
def get_all_subclasses(cls):
"""**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead"""
return list(_get_all_subclasses(cls))
# End compat shims
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (size / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument.
example: human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except Exception:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except Exception:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def _json_encode_fallback(obj):
if isinstance(obj, Set):
return list(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Cannot json serialize %s" % to_native(obj))
def jsonify(data, **kwargs):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, default=_json_encode_fallback, **kwargs)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, default=_json_encode_fallback, **kwargs)
except UnicodeDecodeError:
continue
raise UnicodeError('Invalid unicode encoding encountered')
def missing_required_lib(library):
hostname = platform.node()
return "Failed to import the required Python library (%s) on %s's Python %s. Please read module documentation " \
"and install in the appropriate location." % (library, hostname, sys.executable)
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
Common code for quickly building an ansible module in Python
(although you can write modules with anything that can return JSON).
See :ref:`developing_modules_general` for a general introduction
and :ref:`developing_program_flow_modules` for more detailed explanation.
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
# Check whether code set this explicitly for deprecation purposes
if check_invalid_arguments is None:
check_invalid_arguments = True
module_set_check_invalid_arguments = False
else:
module_set_check_invalid_arguments = True
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._clean = {}
self.aliases = {}
self._legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
# Do this at the end so that logging parameters have been set up
# This is to warn third party module authors that the functionatlity is going away.
# We exclude uri and zfs as they have their own deprecation warnings for users and we'll
# make sure to update their code to stop using check_invalid_arguments when 2.9 rolls around
if module_set_check_invalid_arguments and self._name not in ('uri', 'zfs'):
self.deprecate('Setting check_invalid_arguments is deprecated and will be removed.'
' Update the code for this module In the future, AnsibleModule will'
' always check for invalid arguments.', version='2.9')
@property
def tmpdir(self):
# if _ansible_tmpdir was not set and we have a remote_tmp,
# the module needs to create it and clean it up once finished.
# otherwise we create our own module tmp dir from the system defaults
if self._tmpdir is None:
basedir = None
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if not os.path.exists(basedir):
try:
os.makedirs(basedir, mode=0o700)
except (OSError, IOError) as e:
self.warn("Unable to use %s as temporary directory, "
"failing back to system: %s" % (basedir, to_native(e)))
basedir = None
else:
self.warn("Module remote_tmp %s did not exist and was "
"created with a mode of 0700, this may cause"
" issues when running as another user. To "
"avoid this, create the remote_tmp dir with "
"the correct permissions manually" % basedir)
basefile = "ansible-moduletmp-%s-" % time.time()
try:
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
except (OSError, IOError) as e:
self.fail_json(
msg="Failed to create remote module tmp path at dir %s "
"with prefix %s: %s" % (basedir, basefile, to_native(e))
)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except Exception:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if self.check_file_absent_if_check_mode(b_path):
return True
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path)
attr_mod = '='
if attributes.startswith(('-', '+')):
attr_mod = attributes[0]
attributes = attributes[1:]
if existing.get('attr_flags', '') != attributes or attr_mod == '-':
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except Exception:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
elif k.startswith('_ansible_'):
# handle setting internal properties from internal ansible vars
key = k.replace('_ansible_', '')
if key in PASS_BOOLS:
setattr(self, PASS_VARS[key], self.boolean(v))
else:
setattr(self, PASS_VARS[key], v)
# clean up internal params:
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ', '.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (', '.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % ', '.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ", ".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
term = 'any'
else:
term = 'all'
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but %s of the following are missing: %s" % (key, val, term, ', '.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
# Allow one or more when type='list' param with choices
if isinstance(param[k], list):
diff_list = ", ".join([item for item in param[k] if item not in choices])
if diff_list:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one or more of: %s. Got no match for: %s" % (k, choices_str, diff_list)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
elif param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ", ".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except Exception:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if v.get('apply_defaults', False):
if spec is not None:
if params.get(k) is None:
params[k] = {}
else:
continue
elif spec is None or k not in params or params[k] is None:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
if HAS_SYSLOG:
# If syslog_facility specified, it needs to convert
# from the facility name to the facility code, and
# set it as SYSLOG_FACILITY argument of journal.send()
facility = getattr(syslog,
self._syslog_facility,
syslog.LOG_USER) >> 3
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
SYSLOG_FACILITY=facility,
**dict(journal_args))
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except Exception:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except Exception:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
Find system executable in PATH.
:param arg: The executable to find.
:param required: if executable is not found and required is ``True``, fail_json
:param opt_dirs: optional list of directories to search in addition to ``PATH``
:returns: if found return full path; otherwise return None
'''
bin_path = None
try:
bin_path = get_bin_path(arg, required, opt_dirs)
except ValueError as e:
self.fail_json(msg=to_text(e))
return bin_path
def boolean(self, arg):
'''Convert the argument to a boolean'''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
elif isinstance(d, Mapping):
self.deprecate(d['msg'], version=d.get('version', None))
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
if 'msg' not in kwargs:
raise AssertionError("implementation error -- msg to explain the error is required")
kwargs['failed'] = True
# Add traceback if debug or high verbosity and it is missing
# NOTE: Badly named as exception, it really always has been a traceback
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
if PY2:
# On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
''.join(traceback.format_tb(sys.exc_info()[2]))
else:
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ', '.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp',
dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
(src, dest, b_tmp_dest_name, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def _restore_signal_handlers(self):
# Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses.
if PY2 and sys.platform != 'win32':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment variable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
dictates whether ``~`` is expanded in paths and environment variables
are expanded before running the command. When ``True`` a string such as
``$SHELL`` will be expanded regardless of escaping. When ``False`` and
``use_unsafe_shell=False`` no path or variable expansion will be done.
:kw pass_fds: When running on python3 this argument
dictates which file descriptors should be passed
to an underlying ``Popen`` constructor.
:kw before_communicate_callback: This function will be called
after ``Popen`` object will be created
but before communicating to the process.
(``Popen`` object will be passed to callback as a first argument)
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = " ".join([shlex_quote(x) for x in args])
# not set explicitly, check if set by controller
if executable:
args = [executable, '-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [self._shell, '-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand ``~`` in paths, and all environment vars
if expand_user_and_vars:
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
else:
args = [x for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=self._restore_signal_handlers,
)
if PY3 and pass_fds:
kwargs["pass_fds"] = pass_fds
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
if before_communicate_callback:
before_communicate_callback(cmd)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=self._clean_args(args))
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| gpl-3.0 | -3,507,961,514,487,396,400 | 38.904795 | 155 | 0.552419 | false |
andresailer/DIRAC | Core/Utilities/Distribution.py | 4 | 16711 | # $HeadURL$
__RCSID__ = "$Id$"
import urllib2, re, tarfile, os, types, sys, subprocess, urlparse, tempfile
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import CFG, File, List
class Distribution:
cernAnonRoot = 'http://svn.cern.ch/guest/dirac'
googleAnonRoot = 'http://dirac-grid.googlecode.com/svn'
cernDevRoot = 'svn+ssh://svn.cern.ch/reps/dirac'
googleDevRoot = 'https://dirac-grid.googlecode.com/svn'
anonymousSVNRoot = { 'global' : cernAnonRoot,
'DIRAC' : cernAnonRoot,
'LHCbDIRAC' : cernAnonRoot,
'LHCbVMDIRAC' : cernAnonRoot,
'LHCbWebDIRAC' : cernAnonRoot,
'BelleDIRAC' : googleAnonRoot,
'MagicDIRAC' : googleAnonRoot,
'CTADIRAC' : googleAnonRoot,
'EELADIRAC' : googleAnonRoot,
'ILCDIRAC' : cernAnonRoot,
'Docs' : googleAnonRoot,
}
devSVNRoot = { 'global' : cernDevRoot,
'DIRAC' : cernDevRoot,
'LHCbDIRAC' : cernDevRoot,
'LHCbVMDIRAC' : cernDevRoot,
'LHCbWebDIRAC' : cernDevRoot,
'ILCDIRAC' : cernDevRoot,
'BelleDIRAC' : googleDevRoot,
'MagicDIRAC' : googleDevRoot,
'CTADIRAC' : googleDevRoot,
'EELADIRAC' : googleDevRoot,
'Docs' : googleDevRoot,
}
def __init__( self, package = False ):
if not package:
package = 'global'
if package not in Distribution.anonymousSVNRoot:
raise Exception( "Package %s does not have a registered svn root" % package )
self.package = package
self.svnRoot = Distribution.anonymousSVNRoot[ package ]
self.svnPass = False
self.svnUser = False
self.cmdQueue = []
def getSVNPathForPackage( self, package, path ):
if package not in self.anonymousSVNRoot:
return "%s/%s" % ( Distribution.cernAnonRoot, path )
return "%s/%s" % ( self.anonymousSVNRoot[ package ], path )
def getPackageName( self ):
return self.package
def getDevPath( self, path = False ):
devPath = Distribution.devSVNRoot[ self.package ]
if path:
devPath += "/%s" % path
return devPath
def setSVNPassword( self, password ):
self.svnPass = password
def setSVNUser( self, user ):
self.svnUser = user
def addCommandToQueue( self, cmd ):
self.cmdQueue.append( cmd )
def executeCommandQueue( self ):
while self.cmdQueue:
if not self.executeCommand( self.cmdQueue.pop( 0 ), getOutput = False ):
return False
return True
def emptyQueue( self ):
return len( self.cmdQueue ) == 0
def getRepositoryVersions( self ):
if self.package == 'global' :
webLocation = "%s/tags" % self.svnRoot
else:
webLocation = '%s/%s/tags/%s' % ( self.svnRoot, self.package, self.package )
try:
remoteFile = urllib2.urlopen( webLocation )
except urllib2.URLError:
gLogger.exception()
sys.exit( 2 )
remoteData = remoteFile.read()
remoteFile.close()
if not remoteData:
gLogger.error( "Could not retrieve versions for package", self.package )
sys.exit( 1 )
versions = []
rePackage = ".*"
versionRE = re.compile( "<li> *<a *href=.*> *(%s)/ *</a> *</li>" % rePackage )
for line in remoteData.split( "\n" ):
res = versionRE.search( line )
if res:
versions.append( res.groups()[0] )
return versions
def getSVNFileContents( self, svnPath ):
gLogger.info( "Reading %s from %s" % ( svnPath, self.svnRoot) )
remoteLocation = "%s/%s" % ( self.svnRoot, svnPath )
try:
remoteFile = urllib2.urlopen( remoteLocation )
remoteData = remoteFile.read()
remoteFile.close()
if remoteData:
return remoteData
except Exception:
pass
#Web cat failed. Try directly with svn
exitStatus, remoteData = self.executeCommand( "svn cat '%s" % remoteLocation )
if exitStatus:
print "Error: Could not retrieve %s from the web nor via SVN. Aborting..." % svnPath
sys.exit( 1 )
return remoteData
def loadCFGFromRepository( self, svnPath ):
remoteData = self.getSVNFileContents( svnPath )
return CFG.CFG().loadFromBuffer( remoteData )
def getVersionsCFG( self ):
return self.loadCFGFromRepository( '%s/trunk/%s/versions.cfg' % ( self.package, self.package ) )
def executeCommand( self, cmd, getOutput = True ):
env = dict( os.environ )
if self.svnPass:
env[ 'SVN_PASSWORD' ] = self.svnPass
if not getOutput:
return subprocess.Popen( cmd, shell = True, env = env ).wait() == 0
#Get output
proc = subprocess.Popen( cmd,
shell = True, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, close_fds = True, env = env )
stdData = proc.stdout.read()
proc.wait()
return ( proc.returncode, stdData )
def __getDevCmdBase( self, path ):
devRoot = self.getDevPath( path )
isHTTPS = False
urlRes = urlparse.urlparse( devRoot )
# Parse a URL into 6 components:
# <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
# (scheme, netloc, path, params, query, fragment)
args = []
if urlRes[0] == "https":
isHTTPS = True
if self.svnUser:
if isHTTPS:
args.append( "--username '%s'" % self.svnUser )
else:
urlRes = list( urlparse.urlparse( devRoot ) )
urlRes[1] = "%s@%s" % ( self.svnUser, urlRes[1] )
devRoot = urlparse.urlunparse( urlRes )
if self.svnPass and isHTTPS:
args.append( "--password '%s'" % self.svnPass )
return ( " ".join( args ), devRoot )
def doLS( self, path ):
destT = self.__getDevCmdBase( path )
cmd = "svn ls %s %s" % destT
return self.executeCommand( cmd, True )
def __cmdImport( self, origin, dest, comment ):
destT = self.__getDevCmdBase( dest )
cmd = "svn import -m '%s' %s '%s' '%s'" % ( comment, destT[0], origin, destT[1] )
return cmd
def queueImport( self, origin, dest, comment ):
self.addCommandToQueue( self.__cmdImport( origin, dest, comment ) )
def doImport( self, origin, dest, comment ):
return self.executeCommand( self.__cmdImport( origin, dest, comment ), False )
def __cmdCopy( self, origin, dest, comment ):
destT = self.__getDevCmdBase( dest )
orT = self.__getDevCmdBase( origin )
cmd = "svn copy -m '%s' %s '%s' '%s'" % ( comment, destT[0], orT[1], destT[1] )
return cmd
def queueCopy( self, origin, dest, comment ):
self.addCommandToQueue( self.__cmdCopy( origin, dest, comment ) )
def __cmdMultiCopy( self, originList, dest, comment ):
orList = [ "'%s'" % self.__getDevCmdBase( orPath )[1] for orPath in originList ]
destT = self.__getDevCmdBase( dest )
cmd = "svn copy -m '%s' %s %s '%s'" % ( comment, destT[0], " ".join( orList ), destT[1] )
return cmd
def queueMultiCopy( self, originList, dest, comment ):
self.addCommandToQueue( self.__cmdMultiCopy( originList, dest, comment ) )
# def doCopy( self, path, comment ):
# return self.executeCommand( self.__cmdCopy( origin, dest, comment ), False )
def __cmdMakeDir( self, path, comment ):
destT = self.__getDevCmdBase( path )
return "svn mkdir --parents -m '%s' %s %s" % ( comment, destT[0], destT[1] )
def queueMakeDir( self, path, comment ):
self.addCommandToQueue( self.__cmdMakeDir( path, comment ) )
def doMakeDir( self, path, comment ):
return self.executeCommand( self.__cmdMakeDir( path, comment ), False )
def doCheckout( self, path, location ):
destT = self.__getDevCmdBase( path )
cmd = "svn co %s '%s' '%s'" % ( destT[0], destT[1], location )
return self.executeCommand( cmd, False )
def doCommit( self, location, comment ):
destT = self.__getDevCmdBase( "" )
cmd = "svn ci -m '%s' %s '%s'" % ( comment, destT[0], location )
return self.executeCommand( cmd, False )
#Get copy revision
def getCopyRevision( self, location ):
destT = self.__getDevCmdBase( location )
cmd = "svn log --stop-on-copy %s '%s'" % ( destT[0], destT[1] )
exitCode, outData = self.executeCommand( cmd )
if exitCode:
return 0
copyRev = 0
revRE = re.compile( "r([0-9]+)\s*\|\s*(\w+).*" )
for line in List.fromChar( outData, "\n" ):
reM = revRE.match( line )
if reM:
copyRev = reM.groups()[0]
return copyRev
#
def writeVersionToTmpInit( self, version ):
verTup = parseVersionString( version )
if not verTup:
return False
destT = self.__getDevCmdBase( "%s/trunk/%s/__init__.py" % ( self.package, self.package ) )
cmd = "svn cat %s '%s'" % ( destT[0], destT[1] )
exitCode, outData = self.executeCommand( cmd )
if exitCode:
return False
tmpfd, tmpname = tempfile.mkstemp()
versionStrings = ( "majorVersion", "minorVersion", "patchLevel", "preVersion" )
reList = []
for iP in range( len( versionStrings ) ):
if verTup[iP]:
replStr = "%s = %s" % ( versionStrings[iP], verTup[iP] )
else:
replStr = "%s = 0" % versionStrings[iP]
reList.append( ( re.compile( r"^(%s\s*=)\s*[0-9]+\s*" % versionStrings[iP] ), replStr ) )
for line in outData.split( "\n" ):
for reCm, replStr in reList:
line = reCm.sub( replStr, line )
os.write( tmpfd, "%s\n" % line )
os.close( tmpfd )
return tmpname
#End of Distribution class
gVersionRE = re.compile( "v([0-9]+)(?:r([0-9]+))?(?:p([0-9]+))?(?:-pre([0-9]+))?" )
def parseVersionString( version ):
result = gVersionRE.match( version.strip() )
if not result:
return False
vN = []
for e in result.groups():
if e:
vN.append( int( e ) )
else:
vN.append( None )
return tuple( vN )
def writeVersionToInit( rootPath, version ):
verTup = parseVersionString( version )
if not verTup:
return S_OK()
initFile = os.path.join( rootPath, "__init__.py" )
if not os.path.isfile( initFile ):
return S_OK()
try:
fd = open( initFile, "r" )
fileData = fd.read()
fd.close()
except Exception as e:
return S_ERROR( "Could not open %s: %s" % ( initFile, str( e ) ) )
versionStrings = ( "majorVersion", "minorVersion", "patchLevel", "preVersion" )
reList = []
for iP in range( len( versionStrings ) ):
if verTup[iP]:
replStr = "%s = %s" % ( versionStrings[iP], verTup[iP] )
else:
replStr = "%s = 0" % versionStrings[iP]
reList.append( ( re.compile( r"^(%s\s*=)\s*[0-9]+\s*" % versionStrings[iP] ), replStr ) )
newData = []
for line in fileData.split( "\n" ):
for reCm, replStr in reList:
line = reCm.sub( replStr, line )
newData.append( line )
try:
fd = open( initFile, "w" )
fd.write( "\n".join( newData ) )
fd.close()
except Exception as e:
return S_ERROR( "Could write to %s: %s" % ( initFile, str( e ) ) )
return S_OK()
#
def createTarball( tarballPath, directoryToTar, additionalDirectoriesToTar = None ):
tf = tarfile.open( tarballPath, "w:gz" )
tf.add( directoryToTar, os.path.basename( os.path.abspath( directoryToTar ) ), recursive = True )
if isinstance( additionalDirectoriesToTar, basestring ):
additionalDirectoriesToTar = [ additionalDirectoriesToTar ]
if additionalDirectoriesToTar:
for dirToTar in additionalDirectoriesToTar:
if os.path.isdir( dirToTar ):
tf.add( dirToTar, os.path.basename( os.path.abspath( dirToTar ) ), recursive = True )
tf.close()
md5FilePath = False
for suffix in ( ".tar.gz", ".gz" ):
sLen = len( suffix )
if tarballPath[ len( tarballPath ) - sLen: ] == suffix:
md5FilePath = "%s.md5" % tarballPath[:-sLen]
break
if not md5FilePath:
return S_ERROR( "Could not generate md5 filename" )
md5str = File.getMD5ForFiles( [ tarballPath ] )
fd = open( md5FilePath, "w" )
fd.write( md5str )
fd.close()
return S_OK()
#Start of release notes
gAllowedNoteTypes = ( "NEW", "CHANGE", "BUGFIX", 'FIX' )
gNoteTypeAlias = { 'FIX' : 'BUGFIX' }
def retrieveReleaseNotes( packages ):
if isinstance( packages, basestring ):
packages = [ str( packages ) ]
packageCFGDict = {}
#Get the versions.cfg
for package in packages:
packageCFGDict[ package ] = Distribution( package ).getVersionsCFG()
#Parse the release notes
pkgNotesDict = {}
for package in packageCFGDict:
versionsCFG = packageCFGDict[ package ][ 'Versions' ]
pkgNotesDict[ package ] = []
for mainVersion in versionsCFG.listSections( ordered = True ):
vCFG = versionsCFG[ mainVersion ]
versionNotes = {}
for subsys in vCFG.listOptions():
comment = vCFG.getComment( subsys )
if not comment:
continue
versionNotes[ subsys ] = {}
lines = List.fromChar( comment, "\n" )
lastCommentType = False
for line in lines:
processedLine = False
for typeComment in gAllowedNoteTypes:
if line.find( "%s:" % typeComment ) == 0:
if typeComment in gNoteTypeAlias:
effectiveType = gNoteTypeAlias[ typeComment ]
else:
effectiveType = typeComment
if effectiveType not in versionNotes[ subsys ]:
versionNotes[ subsys ][ effectiveType ] = []
versionNotes[ subsys ][ effectiveType ].append( line[ len( typeComment ) + 1: ].strip() )
lastCommentType = effectiveType
processedLine = True
if not processedLine and lastCommentType:
versionNotes[ subsys ][ effectiveType ][-1] += " %s" % line.strip()
if versionNotes:
pkgNotesDict[ package ].append( { 'version' : mainVersion, 'notes' : versionNotes } )
versionComment = versionsCFG.getComment( mainVersion )
if versionComment:
pkgNotesDict[ package ][-1][ 'comment' ] = "\n".join( [ l.strip() for l in versionComment.split( "\n" ) ] )
return pkgNotesDict
def generateReleaseNotes( packages, destinationPath, versionReleased = "", singleVersion = False ):
if isinstance( packages, basestring ):
packages = [ str( packages ) ]
pkgNotesDict = retrieveReleaseNotes( packages )
fileContents = []
foundStartVersion = versionReleased == ""
for package in packages:
if package not in pkgNotesDict:
continue
#Add a section with the package name
dummy = "Package %s" % package
fileContents.append( "-" * len( dummy ) )
fileContents.append( dummy )
fileContents.append( "-" * len( dummy ) )
vNotesDict = pkgNotesDict[ package ]
for versionNotes in vNotesDict:
if singleVersion and versionReleased and versionNotes[ 'version' ] != versionReleased:
continue
if versionReleased and versionReleased == versionNotes[ 'version' ]:
foundStartVersion = True
#Skip until found initial version
if not foundStartVersion:
continue
dummy = "Version %s" % versionNotes[ 'version' ]
fileContents.append( "" )
fileContents.append( dummy )
fileContents.append( "-" * len( dummy ) )
if 'comment' in versionNotes:
fileContents.extend( [ '', versionNotes[ 'comment' ], '' ] )
for noteType in gAllowedNoteTypes:
notes4Type = []
for system in versionNotes[ 'notes' ]:
if noteType in versionNotes[ 'notes' ][ system ] and versionNotes[ 'notes' ][ system ][ noteType ]:
notes4Type.append( " %s" % system )
for line in versionNotes[ 'notes' ][ system ][ noteType ]:
notes4Type.append( " - %s" % line )
if notes4Type:
fileContents.append( "" )
fileContents.append( "%s" % noteType )
fileContents.append( ":" * len( noteType ) )
fileContents.append( "" )
fileContents.extend( notes4Type )
fd = open( destinationPath, "w" )
fd.write( "%s\n\n" % "\n".join( fileContents ) )
fd.close()
def generateHTMLReleaseNotesFromRST( rstFile, htmlFile ):
try:
import docutils.core
except ImportError:
gLogger.error( "Docutils is not installed, skipping generation of release notes in html format" )
return False
try:
fd = open( rstFile )
rstData = fd.read()
fd.close()
except Exception:
gLogger.error( "Oops! Could not read the rst file :P" )
return False
parts = docutils.core.publish_parts( rstData, writer_name = 'html' )
try:
fd = open( htmlFile, "w" )
fd.write( parts[ 'whole' ] )
fd.close()
except Exception:
gLogger.error( "Oops! Could not write the html file :P" )
return False
return True
| gpl-3.0 | 2,413,525,882,851,796,500 | 35.015086 | 115 | 0.611094 | false |
LIS/lis-tempest | tempest/api/identity/v2/test_users.py | 4 | 3312 | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import time
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
from tempest import manager
from tempest import test
class IdentityUsersTest(base.BaseIdentityV2Test):
@classmethod
def resource_setup(cls):
super(IdentityUsersTest, cls).resource_setup()
cls.creds = cls.os.credentials
cls.username = cls.creds.username
cls.password = cls.creds.password
cls.tenant_name = cls.creds.tenant_name
@test.idempotent_id('165859c9-277f-4124-9479-a7d1627b0ca7')
def test_user_update_own_password(self):
self.new_creds = copy.copy(self.creds.credentials)
self.new_creds.password = data_utils.rand_password()
# we need new non-admin Identity Client with new credentials, since
# current non_admin_client token will be revoked after updating
# password
self.non_admin_users_client_for_cleanup = copy.copy(
self.non_admin_users_client)
self.non_admin_users_client_for_cleanup.auth_provider = (
manager.get_auth_provider(self.new_creds))
user_id = self.creds.credentials.user_id
old_pass = self.creds.credentials.password
new_pass = self.new_creds.password
# to change password back. important for allow_tenant_isolation = false
self.addCleanup(
self.non_admin_users_client_for_cleanup.update_user_own_password,
user_id, original_password=new_pass, password=old_pass)
# user updates own password
self.non_admin_users_client.update_user_own_password(
user_id, password=new_pass, original_password=old_pass)
# NOTE(morganfainberg): Fernet tokens are not subsecond aware and
# Keystone should only be precise to the second. Sleep to ensure
# we are passing the second boundary.
time.sleep(1)
# check authorization with new password
self.non_admin_token_client.auth(self.username,
new_pass,
self.tenant_name)
# authorize with old token should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
self.non_admin_token_client.auth_token,
self.non_admin_users_client.token)
# authorize with old password should lead to Unauthorized
self.assertRaises(exceptions.Unauthorized,
self.non_admin_token_client.auth,
self.username,
old_pass,
self.tenant_name)
| apache-2.0 | -4,845,776,916,875,552,000 | 41.461538 | 79 | 0.654891 | false |
noironetworks/horizon | openstack_dashboard/dashboards/project/volume_groups/tests.py | 1 | 15316 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from django.urls import reverse
from django.utils.http import urlunquote
import mock
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:volume_groups:index')
VOLUME_GROUPS_SNAP_INDEX_URL = urlunquote(reverse(
'horizon:project:vg_snapshots:index'))
def create_mocks(target, methods):
def wrapper(function):
@functools.wraps(function)
def wrapped(inst, *args, **kwargs):
for method in methods:
if isinstance(method, str):
method_mocked = method
attr_name = method
else:
method_mocked = method[0]
attr_name = method[1]
m = mock.patch.object(target, method_mocked)
setattr(inst, 'mock_%s' % attr_name, m.start())
return function(inst, *args, **kwargs)
return wrapped
return wrapper
class VolumeGroupTests(test.TestCase):
@create_mocks(cinder, [
'extension_supported',
'availability_zone_list',
'volume_type_list',
'group_list',
'group_type_list',
'group_create',
])
def test_create_group(self):
group = self.cinder_groups.first()
volume_types = self.cinder_volume_types.list()
volume_type_id = self.cinder_volume_types.first().id
selected_types = [volume_type_id]
az = self.cinder_availability_zones.first().zoneName
formData = {
'volume_types': '1',
'name': 'test VG',
'description': 'test desc',
'availability_zone': az,
'group_type': group.group_type,
'add_vtypes_to_group_role_member': selected_types,
}
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_volume_type_list.return_value = volume_types
self.mock_group_list.return_value = self.cinder_groups.list()
self.mock_group_type_list.return_value = self.cinder_group_types.list()
self.mock_group_create.return_value = group
url = reverse('horizon:project:volume_groups:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_volume_type_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
self.mock_group_type_list.assert_called_once_with(test.IsHttpRequest())
self.mock_group_create.assert_called_once_with(
test.IsHttpRequest(),
formData['name'],
formData['group_type'],
selected_types,
description=formData['description'],
availability_zone=formData['availability_zone'])
@create_mocks(cinder, [
'extension_supported',
'availability_zone_list',
'volume_type_list',
'group_list',
'group_type_list',
'group_create',
])
def test_create_group_exception(self):
group = self.cinder_groups.first()
volume_types = self.cinder_volume_types.list()
volume_type_id = self.cinder_volume_types.first().id
selected_types = [volume_type_id]
az = self.cinder_availability_zones.first().zoneName
formData = {
'volume_types': '1',
'name': 'test VG',
'description': 'test desc',
'availability_zone': az,
'group_type': group.group_type,
'add_vtypes_to_group_role_member': selected_types,
}
self.mock_extension_supported.return_value = True
self.mock_availability_zone_list.return_value = \
self.cinder_availability_zones.list()
self.mock_volume_type_list.return_value = volume_types
self.mock_group_list.return_value = self.cinder_groups.list()
self.mock_group_type_list.return_value = self.cinder_group_types.list()
self.mock_group_create.side_effect = self.exceptions.cinder
url = reverse('horizon:project:volume_groups:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertIn("Unable to create group.",
res.cookies.output())
self.mock_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'AvailabilityZones')
self.mock_availability_zone_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_volume_type_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_group_list.assert_called_once_with(test.IsHttpRequest())
self.mock_group_type_list.assert_called_once_with(test.IsHttpRequest())
self.mock_group_create.assert_called_once_with(
test.IsHttpRequest(),
formData['name'],
formData['group_type'],
selected_types,
description=formData['description'],
availability_zone=formData['availability_zone'])
@create_mocks(cinder, ['group_get', 'group_delete'])
def test_delete_group(self):
group = self.cinder_groups.first()
self.mock_group_get.return_value = group
self.mock_group_delete.return_value = None
url = reverse('horizon:project:volume_groups:delete',
args=[group.id])
res = self.client.post(url)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_group_get.assert_called_once_with(test.IsHttpRequest(),
group.id)
self.mock_group_delete.assert_called_once_with(test.IsHttpRequest(),
group.id,
delete_volumes=False)
@create_mocks(cinder, ['group_get', 'group_delete'])
def test_delete_group_delete_volumes_flag(self):
group = self.cinder_consistencygroups.first()
formData = {'delete_volumes': True}
self.mock_group_get.return_value = group
self.mock_group_delete.return_value = None
url = reverse('horizon:project:volume_groups:delete',
args=[group.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_group_get.assert_called_once_with(test.IsHttpRequest(),
group.id)
self.mock_group_delete.assert_called_once_with(test.IsHttpRequest(),
group.id,
delete_volumes=True)
@create_mocks(cinder, ['group_get', 'group_delete'])
def test_delete_group_exception(self):
group = self.cinder_groups.first()
formData = {'delete_volumes': False}
self.mock_group_get.return_value = group
self.mock_group_delete.side_effect = self.exceptions.cinder
url = reverse('horizon:project:volume_groups:delete',
args=[group.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_group_get.assert_called_once_with(test.IsHttpRequest(),
group.id)
self.mock_group_delete.assert_called_once_with(test.IsHttpRequest(),
group.id,
delete_volumes=False)
def test_update_group_add_vol(self):
self._test_update_group_add_remove_vol(add=True)
def test_update_group_remove_vol(self):
self._test_update_group_add_remove_vol(add=False)
@create_mocks(cinder, ['volume_list',
'volume_type_list',
'group_get',
'group_update'])
def _test_update_group_add_remove_vol(self, add=True):
group = self.cinder_groups.first()
volume_types = self.cinder_volume_types.list()
volumes = (self.cinder_volumes.list() +
self.cinder_group_volumes.list())
group_voltype_names = [t.name for t in volume_types
if t.id in group.volume_types]
compat_volumes = [v for v in volumes
if v.volume_type in group_voltype_names]
compat_volume_ids = [v.id for v in compat_volumes]
assigned_volume_ids = [v.id for v in compat_volumes
if getattr(v, 'group_id', None)]
add_volume_ids = [v.id for v in compat_volumes
if v.id not in assigned_volume_ids]
new_volums = compat_volume_ids if add else []
formData = {
'default_add_volumes_to_group_role': 'member',
'add_volumes_to_group_role_member': new_volums,
}
self.mock_volume_list.return_value = volumes
self.mock_volume_type_list.return_value = volume_types
self.mock_group_get.return_value = group
self.mock_group_update.return_value = group
url = reverse('horizon:project:volume_groups:manage',
args=[group.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_volume_list, 2,
mock.call(test.IsHttpRequest()))
self.mock_volume_type_list.assert_called_once_with(
test.IsHttpRequest())
self.mock_group_get.assert_called_once_with(
test.IsHttpRequest(), group.id)
if add:
self.mock_group_update.assert_called_once_with(
test.IsHttpRequest(), group.id,
add_volumes=add_volume_ids,
remove_volumes=[])
else:
self.mock_group_update.assert_called_once_with(
test.IsHttpRequest(), group.id,
add_volumes=[],
remove_volumes=assigned_volume_ids)
@create_mocks(cinder, ['group_get', 'group_update'])
def test_update_group_name_and_description(self):
group = self.cinder_groups.first()
formData = {'name': 'test VG-new',
'description': 'test desc-new'}
self.mock_group_get.return_value = group
self.mock_group_update.return_value = group
url = reverse('horizon:project:volume_groups:update',
args=[group.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_group_get.assert_called_once_with(
test.IsHttpRequest(), group.id)
self.mock_group_update.assert_called_once_with(
test.IsHttpRequest(), group.id,
formData['name'],
formData['description'])
@create_mocks(cinder, ['group_get', 'group_update'])
def test_update_group_with_exception(self):
group = self.cinder_groups.first()
formData = {'name': 'test VG-new',
'description': 'test desc-new'}
self.mock_group_get.return_value = group
self.mock_group_update.side_effect = self.exceptions.cinder
url = reverse('horizon:project:volume_groups:update',
args=[group.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_group_get.assert_called_once_with(
test.IsHttpRequest(), group.id)
self.mock_group_update.assert_called_once_with(
test.IsHttpRequest(), group.id,
formData['name'],
formData['description'])
@mock.patch.object(cinder, 'group_get')
def test_detail_view_with_exception(self, mock_group_get):
group = self.cinder_groups.first()
mock_group_get.side_effect = self.exceptions.cinder
url = reverse('horizon:project:volume_groups:detail',
args=[group.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
mock_group_get.assert_called_once_with(
test.IsHttpRequest(), group.id)
@create_mocks(cinder, ['group_snapshot_create'])
def test_create_snapshot(self):
group = self.cinder_groups.first()
group_snapshot = self.cinder_group_snapshots.first()
formData = {'name': 'test VG Snapshot',
'description': 'test desc'}
self.mock_group_snapshot_create.return_value = group_snapshot
url = reverse('horizon:project:volume_groups:create_snapshot',
args=[group.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, VOLUME_GROUPS_SNAP_INDEX_URL)
self.mock_group_snapshot_create.assert_called_once_with(
test.IsHttpRequest(),
group.id,
formData['name'],
formData['description'])
@create_mocks(cinder, ['group_get',
'group_create_from_source'])
def test_create_clone(self):
group = self.cinder_groups.first()
formData = {
'group_source': group.id,
'name': 'test VG Clone',
'description': 'test desc',
}
self.mock_group_get.return_value = group
self.mock_group_create_from_source.return_value = group
url = reverse('horizon:project:volume_groups:clone_group',
args=[group.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_group_get.assert_called_once_with(
test.IsHttpRequest(), group.id)
self.mock_group_create_from_source.assert_called_once_with(
test.IsHttpRequest(),
formData['name'],
source_group_id=group.id,
description=formData['description'])
| apache-2.0 | 6,501,920,724,235,035,000 | 39.518519 | 79 | 0.591799 | false |
simar7/build-mozharness | configs/vcs_sync/build-repos.py | 3 | 2940 | import os
import socket
hostname = socket.gethostname()
build_repos = (
'autoland',
'buildapi',
'buildbot-configs',
'buildbotcustom',
'mozharness',
'opsi-package-sources',
'partner-repacks',
'preproduction',
'puppet',
'puppet-manifests',
'rpm-sources',
'talos',
'tools',
)
conversion_repos = []
remote_targets = {}
for repo in build_repos:
conversion_repos.append({
"repo": "https://hg.mozilla.org/build/%s" % repo,
"repo_name": "build-%s" % repo,
"conversion_dir": "build-%s" % repo,
"targets": [{
"target_dest": "build-%s-github" % repo,
"force_push": True
}],
"vcs": "hg",
"mapper": {
"url": "https://api.pub.build.mozilla.org/mapper",
"project": "build-%s" % repo,
},
"branch_config": {
"branches": {
"default": "master",
},
"branch_regexes": [
"^.*$"
]
},
# Bug 1036819 - build/* repos currently not able to push tags to github
# temporarily disable tags in conversion.
# When bug 1020613 is resolved, this tag_config below can be enabled again.
# "tag_config": {
# "tag_regexes": [
# "^.*$"
# ]
# },
"generate_git_notes": True, # False by default
})
remote_targets["build-%s-github" % repo] = {
"repo": "[email protected]:mozilla/build-%s.git" % repo,
"ssh_key": "~/.ssh/releng-github-id_rsa",
"vcs": "git",
}
config = {
"log_name": "build-repos",
"log_max_rotate": 99,
"job_name": "build-repos",
"env": {
"PATH": "%(PATH)s:/usr/libexec/git-core",
},
"conversion_repos": conversion_repos,
"remote_targets": remote_targets,
"virtualenv_modules": [
"dulwich==0.9.0",
"ordereddict==1.1",
"hg-git==0.4.0-moz2",
"mapper==0.1",
"mercurial==2.6.3",
"mozfile==0.9",
"mozinfo==0.5",
"mozprocess==0.11",
"requests==2.2.1",
],
"find_links": [
"http://pypi.pub.build.mozilla.org/pub"
],
"pip_index": False,
"default_notify_from": "vcs2vcs@%s" % hostname,
"notify_config": [{
"to": "[email protected]",
"failure_only": False,
"skip_empty_messages": True,
}],
# Disallow sharing, since we want pristine .hg and .git directories.
"vcs_share_base": None,
"hg_share_base": None,
# any hg command line options
"hg_options": (
"--config",
"web.cacerts=/etc/pki/tls/certs/ca-bundle.crt"
),
"default_actions": [
'list-repos',
'create-virtualenv',
'update-stage-mirror',
'update-work-mirror',
'create-git-notes',
'publish-to-mapper',
'push',
'combine-mapfiles',
'upload',
'notify',
],
}
| mpl-2.0 | 8,176,191,087,614,149,000 | 24.344828 | 75 | 0.506463 | false |
je-nunez/callgrind_to_NewRelic | callgrind_to_newrelic.py | 2 | 16728 | #!/usr/bin/env python
"""
Parse the grammar, summarize the syntactic tree, and upload a Valgring
callgrind file to New Relic.
"""
# Just a first incrementally parsing some Valgrind callgrind file format
# using the PLY module (the Natural Language Toolkit, NLTK, is also
# another possibility)
import sys
import ply.lex as lex
import ply.yacc as yacc # pylint: disable=unused-import
# An incremental test of some productions in the grammar (and associated
# lexical tokens)
#
# http://valgrind.org/docs/manual/cl-format.html#cl-format.reference.grammar
#
# ProfileDataFile := FormatVersion? Creator? PartData*
#
# FormatVersion := "version:" Space* Number "\n"
#
# Creator := "creator:" NoNewLineChar* "\n"
#
# TargetCommand := "cmd:" Space* NoNewLineChar*
#
# TargetID := ("pid"|"thread"|"part") ":" Space* Number
#
# Description := "desc:" Space* Name Space* ":" NoNewLineChar*
#
# EventSpecification := "event:" Space* Name InheritedDef? LongNameDef?
#
# CostLineDef := "events:" Space* Name (Space+ Name)*
#
# Name = Alpha (Digit | Alpha)*
#
# CostPosition := "ob" | "fl" | "fi" | "fe" | "fn"
#
# CalledPosition := " "cob" | "cfi" | "cfl" | "cfn"
#
class PlyLexerValgrindCallgrind(object):
# pylint: disable=too-many-public-methods
"""A class whose instantations will have the PLY lexer for the Valgrind
Callgrind file format.
This lexer is being built incrementally, adding new tokens from the
Callgrind grammar specification. The YACC parser for the Context-Free
Grammar will be built later.
"""
def __init__(self):
"""Instance constructor"""
self.lexer = lex.lex(module=self)
def tokenize_strings(self, strings):
"""tokenize strings"""
self.lexer.input(strings)
while True:
token = self.lexer.token()
if not token:
break
print token
return True
# Ignored characters
# t_ignore = " \t"
# Order is important in the tokens in PLY for it is the order in which
# the tokens will be analyzed, eg., the possibility of token
# 'lex_hex_number'
# should should be analyzed before the possibility of the token
# 'lex_dec_number'
# and similarly, the possibility of token
# 'lex_name'
# should be analyzed before token
# 'lex_rest_of_line'
tokens = (
'lex_equal_sign', 'lex_minus_sign', 'lex_plus_sign', 'lex_star_sign',
'lex_hex_number', 'lex_dec_number', 'lex_new_line', 'lex_version',
'lex_creator', 'lex_target_command', 'lex_target_id_pid',
'lex_target_id_thread', 'lex_target_id_part', 'lex_description',
'lex_event_specification', 'lex_call_line_calls',
'lex_cost_line_def_events', 'lex_cost_positions', 'lex_instr_token',
'lex_cost_position_ob', 'lex_cost_position_fl',
'lex_cost_position_fi', 'lex_cost_position_fe',
'lex_cost_position_fn', 'lex_called_position_cob',
'lex_called_position_cfi', 'lex_called_position_cfl',
'lex_called_position_cfn', 'lex_name', 'lex_rest_of_line',
'lex_spacetab'
)
# Tokens
# We mostly define the PLY tokens not as PLY regular-expression objects,
# but PLY functions, since the later give more flexibility
def t_lex_equal_sign(self, lex_token):
r'='
# pylint: disable=no-self-use
return lex_token
def t_lex_minus_sign(self, lex_token):
r'-'
# pylint: disable=no-self-use
return lex_token
def t_lex_plus_sign(self, lex_token):
r'\+'
# pylint: disable=no-self-use
return lex_token
def t_lex_star_sign(self, lex_token):
r'\*'
# pylint: disable=no-self-use
return lex_token
def t_lex_hex_number(self, lex_token):
r'0x[0-9A-Fa-f]+'
# pylint: disable=no-self-use
try:
lex_token.value = int(lex_token.value, 16)
except ValueError:
print "Hexadecimal value too large %x", lex_token.value
lex_token.value = 0
return lex_token
def t_lex_dec_number(self, lex_token):
r'\d+'
# pylint: disable=no-self-use
try:
lex_token.value = int(lex_token.value)
except ValueError:
print "Integer value too large %d", lex_token.value
lex_token.value = 0
return lex_token
def t_lex_new_line(self, lex_token):
r'\n'
# pylint: disable=no-self-use
lex_token.lexer.lineno += 1
def t_lex_version(self, lex_token):
r'(?m)^[ \t]*version:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_creator(self, lex_token):
'(?m)^[ \t]*creator:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_target_command(self, lex_token):
'(?m)^[ \t]*cmd:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_target_id_pid(self, lex_token):
'(?m)^[ \t]*pid:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_target_id_thread(self, lex_token):
'(?m)^[ \t]*thread:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_target_id_part(self, lex_token):
'(?m)^[ \t]*part:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_description(self, lex_token):
'(?m)^[ \t]*desc:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_event_specification(self, lex_token):
'(?m)^[ \t]*event:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_call_line_calls(self, lex_token):
r'(?m)^[ \t]*calls'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_cost_line_def_events(self, lex_token):
r'(?m)^[ \t]*events:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_cost_positions(self, lex_token):
r'(?m)^[ \t]*positions:'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_instr_token(self, lex_token):
r'\<instr\>'
# pylint: disable=no-self-use
return lex_token
def t_lex_cost_position_ob(self, lex_token):
r'(?m)^[ \t]*ob'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_cost_position_fl(self, lex_token):
r'(?m)^[ \t]*fl'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_cost_position_fi(self, lex_token):
r'(?m)^[ \t]*fi'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_cost_position_fe(self, lex_token):
r'(?m)^[ \t]*fe'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_cost_position_fn(self, lex_token):
r'(?m)^[ \t]*fn'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_called_position_cob(self, lex_token):
r'(?m)^[ \t]*cob'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_called_position_cfi(self, lex_token):
r'(?m)^[ \t]*cfi'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_called_position_cfl(self, lex_token):
r'(?m)^[ \t]*cfl'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_called_position_cfn(self, lex_token):
r'(?m)^[ \t]*cfn'
# pylint: disable=no-self-use
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_name(self, lex_token): # pylint: disable=no-self-use
'[a-zA-Z][a-zA-Z0-9]*'
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_rest_of_line(self, lex_token): # pylint: disable=no-self-use
r'.+'
lex_token.value = lex_token.value.strip()
return lex_token
def t_lex_spacetab(self, dummy_token): # pylint: disable=no-self-use
r'\s+'
# Adding this function which allows the line-oriented nature of the
# Callgrind, like:
# ^[ \t]*version: ... <line> ... '\n'
# ^[ \t]*creator: ... <line> ... '\n'
# ie., the Callgrind records are VERY line oriented (they can't wrap
# to the next line), and this function allows t_lex_spacetab() allows
# to do this (see:
# http://stackoverflow.com/questions/23925820/python-lex-yaccply-not-recognizing-start-of-line-or-start-of-string
# )
pass
def t_error(self, lex_token): # pylint: disable=no-self-use
# pylint: disable=missing-docstring
sys.stderr.write("Illegal character '%s'\n" % lex_token.value[0])
lex_token.lexer.skip(1)
class PlyParserValgrindCallgrind(object):
# pylint: disable=too-many-public-methods
"""A class whose instantations will have the PLY parser for the Valgrind
Callgrind file format.
This parser is being built incrementally, adding new grammatical rules
from the Callgrind grammar specification.
"""
def __init__(self):
"""Instance constructor"""
self.callgrind_lexer = PlyLexerValgrindCallgrind()
self.parser = yacc.yacc(module=self, lexer=self.callgrind_lexer.lexer,
tracking=True)
def parse_strings(self, strings):
"""parse strings"""
self.parser.error = 0
deriv = self.parser.parse(strings)
return deriv if not self.parser.error else None
def p_format_version(self, pars_tree): # pylint: disable=no-self-use
"""FormatVersion : lex_version lex_dec_number lex_new_line
| lex_version lex_hex_number lex_new_line
| lex_version lex_spacetab lex_dec_number lex_new_line
| lex_version lex_spacetab lex_hex_number lex_new_line"""
# this needs new classes for the nodes, not simple Python lists.
# E.g., the classes can be the respective translation into the
# target language, ie., into New Relic. For example, the Callgrind
# FormatVersion can be translated to a New Relic string in its
# newrelic_init() call in the New Relic SDK
pars_tree[0] = [pars_tree[1], pars_tree[-2]]
def p_creator_rest_of_line(self, pars_tree):
# pylint: disable=no-self-use
"""Creator := lex_creator lex_rest_of_line"""
# same as above, that this needs new classes for the nodes, not
# simple Python lists.
creator_signature = pars_tree[-1].strip()
pars_tree[0] = [pars_tree[1], creator_signature]
def p_subposition_posit_number(self, pars_tree):
# pylint: disable=no-self-use
"""SubPosition : lex_dec_number
| lex_hex_number
| lex_plus_sign lex_dec_number
| lex_plus_sign lex_hex_number"""
pars_tree[0] = pars_tree[-1]
def p_subposition_negat_number(self, pars_tree):
# pylint: disable=no-self-use
"""SubPosition : lex_minus_sign lex_dec_number
| lex_minus_sign lex_hex_number"""
pars_tree[0] = - pars_tree[1]
def p_subposition_star_sign(self, pars_tree):
# pylint: disable=no-self-use
"""SubPosition : t_lex_star_sign"""
# TODO: this needs to create a proper abstraction class,
# 'SubPosition', for the parser tree in Python PLY,
# if not we will have problems distinguishing the other
# SubPositions that are merely numeric
pars_tree[0] = pars_tree[1]
def p_costs_number_space(self, pars_tree): # pylint: disable=no-self-use
""""costs : lex_dec_number lex_spacetab
| lex_hex_number lex_spacetab"""
pars_tree[0] = pars_tree[1]
def p_costs_costs_number_space(self, pars_tree):
# pylint: disable=no-self-use
"""costs : costs lex_dec_number lex_spacetab
| costs lex_hex_number lex_spacetab"""
if isinstance(pars_tree[1], list):
pars_tree[0] = pars_tree[1].append(pars_tree[2])
else:
pars_tree[0] = [pars_tree[1], pars_tree[2]]
def p_target_command_lex_rest_line(self, pars_tree):
# pylint: disable=no-self-use
"""TargetCommand : lex_target_command lex_rest_of_line
| lex_target_command lex_spacetab lex_rest_of_line"""
pars_tree[0] = [pars_tree[1],
pars_tree[-1].strip()] # take the last token
def p_target_id_pid_thread_part(self, pars_tree):
# pylint: disable=no-self-use
"""TargetID : lex_target_id_pid lex_spacetab lex_dec_number
| lex_target_id_pid lex_spacetab lex_hex_number
| lex_target_id_pid lex_dec_number
| lex_target_id_pid lex_hex_number
| lex_target_id_thread lex_spacetab lex_dec_number
| lex_target_id_thread lex_spacetab lex_hex_number
| lex_target_id_thread lex_dec_number
| lex_target_id_thread lex_hex_number
| lex_target_id_part lex_spacetab lex_dec_number
| lex_target_id_part lex_spacetab lex_hex_number
| lex_target_id_part lex_dec_number
| lex_target_id_part lex_hex_number"""
# this needs new classes for the nodes, not simple Python lists.
# E.g., the classes can be the respective translation into the
# target language, ie., into New Relic
pars_tree[0] = [pars_tree[1], pars_tree[-1]]
def p_cost_position_alternatives(self, pars_tree):
# pylint: disable=no-self-use
"""CostPosition : lex_cost_position_ob
| lex_cost_position_fl
| lex_cost_position_fi
| lex_cost_position_fe
| lex_cost_position_fn"""
pars_tree[0] = pars_tree[1]
def p_called_position_alternatives(self, pars_tree):
# pylint: disable=no-self-use
"""CalledPosition : lex_called_position_cob
| lex_called_position_cfi
| lex_called_position_cfl
| lex_called_position_cfn"""
pars_tree[0] = pars_tree[1]
def p_error(self, pars_tree): # pylint: disable=no-self-use
"""An error in the parsing."""
from ply.lex import LexToken # the PLY class for actual lexer tokens
if pars_tree:
pos_error = ""
try:
line_err = pars_tree.lineno(0)
lexpos_err = pars_tree.lexpos(0)
pos_error = " at line {} ({})".format(line_err, lexpos_err)
except AttributeError:
if isinstance(pars_tree, LexToken):
# pars_tree is a lexical token.
pos_error = " at line {} ({})".format(pars_tree.lineno,
pars_tree.lexpos)
msg = "Syntax error at element {} {}\n".format(pars_tree.type,
pos_error)
sys.stderr.write(msg)
else:
sys.stderr.write("Syntax error at EOF")
def main():
"""Main function."""
# this is a simple test of the current state of the lexer on a callgrind
# file
callgrind_input_file = "test.callgrind"
if len(sys.argv) >= 2:
callgrind_input_file = sys.argv[1]
with open(callgrind_input_file, "r") as callgrind_file:
data = callgrind_file.read()
callgrind_lexer = PlyLexerValgrindCallgrind()
callgrind_lexer.tokenize_strings(data)
if __name__ == '__main__':
main()
| gpl-2.0 | 6,906,496,149,653,901,000 | 35.12959 | 121 | 0.577296 | false |
hradec/gaffer | python/GafferCortexUITest/__init__.py | 7 | 2108 | ##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from .CompoundPlugValueWidgetTest import CompoundPlugValueWidgetTest
from .CompoundParameterValueWidgetTest import CompoundParameterValueWidgetTest
from .ParameterValueWidgetTest import ParameterValueWidgetTest
from .DocumentationTest import DocumentationTest
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 1,904,418,245,696,576,000 | 48.023256 | 78 | 0.706831 | false |
tinyendian/eb_cray | easyblocks/ncl.py | 1 | 7879 | ##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NCL, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import fileinput
import os
import re
import sys
from distutils.version import LooseVersion
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_NCL(EasyBlock):
"""Support for building/installing NCL."""
def configure_step(self):
"""Configure build:
- create Makefile.ini using make and run ymake script to create config file
- patch config file with correct settings, and add missing config entries
- create config/Site.local file to avoid interactive install
- generate Makefile using config/ymkmf script
-
"""
try:
os.chdir('config')
except OSError, err:
raise EasyBuildError("Failed to change to the 'config' dir: %s", err)
cmd = "make -f Makefile.ini"
run_cmd(cmd, log_all=True, simple=True)
cmd = "./ymake -config $PWD"
run_cmd(cmd, log_all=True, simple=True)
# figure out name of config file
cfg_regexp = re.compile('^\s*SYSTEM_INCLUDE\s*=\s*"(.*)"\s*$', re.M)
f = open("Makefile", "r")
txt = f.read()
f.close()
cfg_filename = cfg_regexp.search(txt).group(1)
# adjust config file as needed
ctof_libs = ''
ifort = get_software_root('ifort')
if ifort:
if LooseVersion(get_software_version('ifort')) < LooseVersion('2011.4'):
ctof_libs = '-lm -L%s/lib/intel64 -lifcore -lifport' % ifort
else:
ctof_libs = '-lm -L%s/compiler/lib/intel64 -lifcore -lifport' % ifort
elif get_software_root('GCC'):
ctof_libs = '-lgfortran -lm'
macrodict = {
'CCompiler': os.getenv('CC'),
'FCompiler': os.getenv('F90'),
'CcOptions': '-ansi %s' % os.getenv('CFLAGS') + ' -DH5Rdereference_vers=1 ',
'FcOptions': os.getenv('FFLAGS') + ' -fno-range-check ',
'COptimizeFlag': os.getenv('CFLAGS'),
'FOptimizeFlag': os.getenv('FFLAGS'),
'ExtraSysLibraries': os.getenv('LDFLAGS'),
'CtoFLibraries': ctof_libs
}
# replace config entries that are already there
for line in fileinput.input(cfg_filename, inplace=1, backup='%s.orig' % cfg_filename):
for (key, val) in macrodict.items():
regexp = re.compile("(#define %s\s*).*" % key)
match = regexp.search(line)
if match:
line = "#define %s %s\n" % (key, val)
macrodict.pop(key)
sys.stdout.write(line)
# add remaining config entries
f = open(cfg_filename, "a")
for (key, val) in macrodict.items():
f.write("#define %s %s\n" % (key, val))
f.close()
f = open(cfg_filename, "r")
self.log.debug("Contents of %s: %s" % (cfg_filename, f.read()))
f.close()
# configure
try:
os.chdir(self.cfg['start_dir'])
except OSError, err:
raise EasyBuildError("Failed to change to the build dir %s: %s", self.cfg['start_dir'], err)
# instead of running the Configure script that asks a zillion questions,
# let's just generate the config/Site.local file ourselves...
# order of deps is important
# HDF needs to go after netCDF, because both have a netcdf.h include file
# Note: HDF currently not supported - requires netCDF support, which is incompatible with other
# software packages
deps = ["HDF5", "JasPer", "netCDF", "HDF-EOS", "HDF-EOS5", "g2lib", "g2clib", "Szip", "UDUNITS"]
libs = ''
includes = ''
for dep in deps:
root = get_software_root(dep)
if not root:
raise EasyBuildError("%s not available", dep)
libs += ' -L%s/lib ' % root
includes += ' -I%s/include ' % root
opt_deps = ["netCDF-Fortran", "GDAL"]
libs_map = {
'netCDF-Fortran': '-lnetcdff -lnetcdf',
'GDAL': '-lgdal',
}
for dep in opt_deps:
root = get_software_root(dep)
if root:
libs += ' -L%s/lib %s ' % (root, libs_map[dep])
includes += ' -I%s/include ' % root
# Help build system find freetype
includes += ' -I/usr/include/freetype2 '
cfgtxt="""#ifdef FirstSite
#endif /* FirstSite */
#ifdef SecondSite
/* Allow file paths to contain x86_64. Note that this will cause macro recursion errors. */
#ifdef x86_64
#undef x86_64
#define x86_64 x86_64
#endif
#define YmakeRoot %(installdir)s
#define LibSearch %(libs)s
#define IncSearch %(includes)s
#define BuildNCL 1
#define HDFlib
#define HDFEOSlib -lGctp -lhdfeos
#define HDFEOS5lib -lhe5_hdfeos
#define BuildGRIB2 1
#define BuildESMF 1
#define UdUnitslib -ludunits2
#define BuildRasterHDF 0
#define BuildHDF4 0
#define BuildTRIANGLE 0
#define BuildHDFEOS 1
#define BuildHDFEOS5 1
#define LexLibrary -lfl
#endif /* SecondSite */
""" % {
'installdir': self.installdir,
'libs': libs,
'includes': includes
}
f = open("config/Site.local", "w")
f.write(cfgtxt)
f.close()
# generate Makefile
cmd = "./config/ymkmf"
run_cmd(cmd, log_all=True, simple=True)
def build_step(self):
"""Building is done in install_step."""
pass
def install_step(self):
"""Build in install dir using build_step."""
paracmd = ""
if self.cfg['parallel']:
paracmd = "-j %s" % self.cfg['parallel']
cmd = "make Everything " + paracmd
run_cmd(cmd, log_all=True, simple=True)
def sanity_check_step(self):
"""
Custom sanity check for NCL
"""
custom_paths = {
'files': ['bin/ncl', 'lib/libncl.a', 'lib/libncarg.a'],
'dirs': ['include/ncarg'],
}
super(EB_NCL, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set NCARG_ROOT environment variable in module."""
txt = super(EB_NCL, self).make_module_extra()
txt += self.module_generator.set_environment('NCARG_ROOT', self.installdir)
return txt
| gpl-2.0 | -6,446,298,528,248,462,000 | 32.961207 | 104 | 0.597665 | false |
ranjiao/raysdemo | crazydepot/src/creoleparser/__init__.py | 1 | 1127 | # __init__.py
#
# Copyright (c) 2009 Stephen Day
#
# This module is part of Creoleparser and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
import string
import keyword
from core import Parser, ArgParser
from dialects import (creole11_base, creole10_base, creepy10_base,
create_dialect, Creole10)
__docformat__ = 'restructuredtext en'
__version__ = '0.6.1'
creole2html = Parser(dialect=create_dialect(creole10_base), method='html')
"""This is a pure Creole 1.0 parser created for convenience"""
text2html = Parser(dialect=create_dialect(creole11_base), method='html')
"""This is a Creole 1.0 parser (+ additions) created for convenience"""
parse_args = ArgParser(dialect=creepy10_base(),key_func=string.lower,
illegal_keys=keyword.kwlist + ['macro_name',
'arg_string', 'body', 'isblock', 'environ', 'macro'])
"""Function for parsing macro arg_strings using a relaxed xml style"""
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| apache-2.0 | 5,605,196,396,034,681,000 | 30.2 | 78 | 0.650399 | false |
arjoly/scikit-learn | sklearn/utils/testing.py | 6 | 26573 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause | 3,945,235,352,036,388,400 | 32.722081 | 81 | 0.613141 | false |
luzpaz/QGIS | tests/src/python/test_qgslayoutpagecollection.py | 11 | 40077 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutPageCollection
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '18/07/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.PyQt import sip
from qgis.core import (QgsUnitTypes,
QgsLayout,
QgsLayoutItemPage,
QgsLayoutSize,
QgsLayoutPoint,
QgsLayoutObject,
QgsProject,
QgsMargins,
QgsProperty,
QgsLayoutGuide,
QgsLayoutMeasurement,
QgsLayoutPageCollection,
QgsSimpleFillSymbolLayer,
QgsLayoutItemShape,
QgsFillSymbol,
QgsReadWriteContext)
from qgis.PyQt.QtCore import Qt, QCoreApplication, QEvent, QPointF, QRectF
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
start_app()
class TestQgsLayoutPageCollection(unittest.TestCase):
def testLayout(self):
# test that layouts have a collection
p = QgsProject()
l = QgsLayout(p)
self.assertTrue(l.pageCollection())
self.assertEqual(l.pageCollection().layout(), l)
def testSymbol(self):
"""
Test setting a page symbol for the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertTrue(collection.pageStyleSymbol())
fill = QgsSimpleFillSymbolLayer()
fill_symbol = QgsFillSymbol()
fill_symbol.changeSymbolLayer(0, fill)
fill.setColor(Qt.green)
fill.setStrokeColor(Qt.red)
fill.setStrokeWidth(6)
collection.setPageStyleSymbol(fill_symbol)
self.assertEqual(collection.pageStyleSymbol().symbolLayer(0).color().name(), '#00ff00')
self.assertEqual(collection.pageStyleSymbol().symbolLayer(0).strokeColor().name(), '#ff0000')
def testPages(self):
"""
Test adding/retrieving/deleting pages from the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertEqual(collection.pageCount(), 0)
self.assertFalse(collection.pages())
self.assertFalse(collection.page(-1))
self.assertFalse(collection.page(0))
self.assertFalse(collection.page(1))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
self.assertEqual(collection.pageNumber(page), -1)
collection.addPage(page)
self.assertTrue(page in l.items())
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.pages(), [page])
self.assertFalse(collection.page(-1))
self.assertEqual(collection.page(0), page)
self.assertFalse(collection.page(1))
self.assertEqual(collection.pageNumber(page), 0)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(collection.pages(), [page, page2])
self.assertFalse(collection.page(-1))
self.assertEqual(collection.page(0), page)
self.assertEqual(collection.page(1), page2)
self.assertEqual(collection.pageNumber(page2), 1)
# insert a page
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A3')
collection.insertPage(page3, 1)
self.assertTrue(page3 in l.items())
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(collection.pages(), [page, page3, page2])
self.assertEqual(collection.page(0), page)
self.assertEqual(collection.page(1), page3)
self.assertEqual(collection.page(2), page2)
self.assertEqual(collection.pageNumber(page3), 1)
# delete page
collection.deletePage(-1)
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(collection.pages(), [page, page3, page2])
collection.deletePage(100)
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(collection.pages(), [page, page3, page2])
collection.deletePage(1)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(collection.pages(), [page, page2])
# make sure page was deleted
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page3))
del l
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page))
self.assertTrue(sip.isdeleted(page2))
def testDeletePages(self):
"""
Test deleting pages from the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
page_about_to_be_removed_spy = QSignalSpy(collection.pageAboutToBeRemoved)
# delete page
collection.deletePage(None)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(len(page_about_to_be_removed_spy), 0)
page3 = QgsLayoutItemPage(l)
# try deleting a page not in collection
collection.deletePage(page3)
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertFalse(sip.isdeleted(page3))
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(len(page_about_to_be_removed_spy), 0)
self.assertEqual(l.layoutBounds(ignorePages=False), QRectF(0.0, 0.0, 210.0, 517.0))
collection.deletePage(page)
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(l.layoutBounds(ignorePages=False), QRectF(0.0, 0.0, 148.0, 210.0))
self.assertFalse(page in collection.pages())
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page))
self.assertEqual(len(page_about_to_be_removed_spy), 1)
self.assertEqual(page_about_to_be_removed_spy[-1][0], 0)
collection.deletePage(page2)
self.assertEqual(collection.pageCount(), 0)
self.assertFalse(collection.pages())
self.assertEqual(l.layoutBounds(ignorePages=False), QRectF())
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page2))
self.assertEqual(len(page_about_to_be_removed_spy), 2)
self.assertEqual(page_about_to_be_removed_spy[-1][0], 0)
def testClear(self):
"""
Test clearing the collection
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
collection.clear()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
page_about_to_be_removed_spy = QSignalSpy(collection.pageAboutToBeRemoved)
# clear
collection.clear()
self.assertEqual(collection.pageCount(), 0)
self.assertEqual(len(page_about_to_be_removed_spy), 2)
QCoreApplication.sendPostedEvents(None, QEvent.DeferredDelete)
self.assertTrue(sip.isdeleted(page))
self.assertTrue(sip.isdeleted(page2))
def testExtendByNewPage(self):
"""
Test extend by adding new page
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# no existing page to extend
self.assertIsNone(collection.extendByNewPage())
self.assertEqual(collection.pageCount(), 0)
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize(QgsLayoutSize(10, 10))
collection.addPage(page)
self.assertEqual(collection.pageCount(), 1)
new_page = collection.extendByNewPage()
self.assertIsNotNone(new_page)
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(new_page.sizeWithUnits(), page.sizeWithUnits())
new_page.setPageSize(QgsLayoutSize(20, 20))
new_page2 = collection.extendByNewPage()
self.assertIsNotNone(new_page2)
self.assertEqual(collection.pageCount(), 3)
self.assertEqual(new_page2.sizeWithUnits(), new_page.sizeWithUnits())
def testMaxPageWidthAndSize(self):
"""
Test calculating maximum page width and size
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertEqual(collection.maximumPageWidth(), 210.0)
self.assertEqual(collection.maximumPageSize().width(), 210.0)
self.assertEqual(collection.maximumPageSize().height(), 297.0)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A3')
collection.addPage(page2)
self.assertEqual(collection.maximumPageWidth(), 297.0)
self.assertEqual(collection.maximumPageSize().width(), 297.0)
self.assertEqual(collection.maximumPageSize().height(), 420.0)
# add a page with other units
page3 = QgsLayoutItemPage(l)
page3.setPageSize(QgsLayoutSize(100, 100, QgsUnitTypes.LayoutMeters))
collection.addPage(page3)
self.assertEqual(collection.maximumPageWidth(), 100000.0)
self.assertEqual(collection.maximumPageSize().width(), 100000.0)
self.assertEqual(collection.maximumPageSize().height(), 100000.0)
def testUniformPageSizes(self):
"""
Test detection of uniform page sizes
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertTrue(collection.hasUniformPageSizes())
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertTrue(collection.hasUniformPageSizes())
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize(QgsLayoutSize(21.0, 29.7, QgsUnitTypes.LayoutCentimeters))
collection.addPage(page2)
self.assertTrue(collection.hasUniformPageSizes())
# add a page with other units
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A5')
collection.addPage(page3)
self.assertFalse(collection.hasUniformPageSizes())
def testReflow(self):
"""
Test reflowing pages
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
# should be positioned at origin
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
# second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 307)
# third page, slotted in middle
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A3')
collection.insertPage(page3, 1)
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 737)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 307)
page.setPageSize(QgsLayoutSize(100, 120))
# no update until reflow is called
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 737)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 307)
collection.reflow()
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 560)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 130)
def testInsertPageWithItems(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
# item on pages
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50), page=0)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), page=1)
l.addLayoutItem(shape2)
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape2.page(), 1)
# third page, slotted in middle
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A3')
collection.insertPage(page3, 0)
# check item position
self.assertEqual(shape1.page(), 1)
self.assertEqual(shape1.pagePositionWithUnits(), QgsLayoutPoint(90, 50))
self.assertEqual(shape2.page(), 2)
self.assertEqual(shape2.pagePositionWithUnits(), QgsLayoutPoint(100, 150))
def testDeletePageWithItems(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A4')
collection.addPage(page2)
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A4')
collection.addPage(page3)
# item on pages
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50), page=0)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), page=2)
l.addLayoutItem(shape2)
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape2.page(), 2)
collection.deletePage(1)
# check item position
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape1.pagePositionWithUnits(), QgsLayoutPoint(90, 50))
self.assertEqual(shape2.page(), 1)
self.assertEqual(shape2.pagePositionWithUnits(), QgsLayoutPoint(100, 150))
def testDeletePageWithItems2(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A4')
collection.addPage(page2)
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A4')
collection.addPage(page3)
# item on pages
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50), page=0)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), page=2)
l.addLayoutItem(shape2)
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape2.page(), 2)
collection.deletePage(page2)
# check item position
self.assertEqual(shape1.page(), 0)
self.assertEqual(shape1.pagePositionWithUnits(), QgsLayoutPoint(90, 50))
self.assertEqual(shape2.page(), 1)
self.assertEqual(shape2.pagePositionWithUnits(), QgsLayoutPoint(100, 150))
def testDataDefinedSize(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add some pages
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
page3 = QgsLayoutItemPage(l)
page3.setPageSize('A5')
collection.addPage(page3)
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 307)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 527)
page.dataDefinedProperties().setProperty(QgsLayoutObject.ItemHeight, QgsProperty.fromExpression('50*3'))
page.refresh()
collection.reflow()
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 160)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 380)
page2.dataDefinedProperties().setProperty(QgsLayoutObject.ItemHeight, QgsProperty.fromExpression('50-20'))
page2.refresh()
collection.reflow()
self.assertEqual(page.pos().x(), 0)
self.assertEqual(page.pos().y(), 0)
self.assertEqual(page2.pos().x(), 0)
self.assertEqual(page2.pos().y(), 160)
self.assertEqual(page3.pos().x(), 0)
self.assertEqual(page3.pos().y(), 200)
def testPositionOnPage(self):
"""
Test pageNumberForPoint and positionOnPage
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 270)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1270)), 0)
self.assertEqual(collection.positionOnPage(QPointF(-100, -100)), QPointF(-100, -100))
self.assertEqual(collection.positionOnPage(QPointF(-100, -1)), QPointF(-100, -1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1)), QPointF(-100, 1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 270)), QPointF(-100, 270))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1270)), QPointF(-100, 973))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 270)), 0)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 370)), 1)
self.assertEqual(collection.pageNumberForPoint(QPointF(-100, 1270)), 1)
self.assertEqual(collection.positionOnPage(QPointF(-100, -100)), QPointF(-100, -100))
self.assertEqual(collection.positionOnPage(QPointF(-100, -1)), QPointF(-100, -1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1)), QPointF(-100, 1))
self.assertEqual(collection.positionOnPage(QPointF(-100, 270)), QPointF(-100, 270))
self.assertEqual(collection.positionOnPage(QPointF(-100, 370)), QPointF(-100, 63))
self.assertEqual(collection.positionOnPage(QPointF(-100, 1270)), QPointF(-100, 753))
def testPredictionPageNumberForPoint(self):
"""
Test predictPageNumberForPoint
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# no crash if no pages
self.assertEqual(collection.predictPageNumberForPoint(QPointF(1, 1)), 0)
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize(QgsLayoutSize(100, 100))
collection.addPage(page)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 20)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 120)), 1)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 230)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 350)), 3)
page2 = QgsLayoutItemPage(l)
page2.setPageSize(QgsLayoutSize(100, 50))
collection.addPage(page2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 20)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 120)), 1)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 230)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 280)), 3)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 340)), 4)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 370)), 5)
page3 = QgsLayoutItemPage(l)
page3.setPageSize(QgsLayoutSize(100, 200))
collection.addPage(page3)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -100)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, -1)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 20)), 0)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 120)), 1)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 230)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 280)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 340)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 370)), 2)
self.assertEqual(collection.predictPageNumberForPoint(QPointF(-100, 470)), 3)
def testPageAtPoint(self):
"""
Test pageAtPoint
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertFalse(collection.pageAtPoint(QPointF(0, 0)))
self.assertFalse(collection.pageAtPoint(QPointF(10, 10)))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertFalse(collection.pageAtPoint(QPointF(10, -1)))
self.assertEqual(collection.pageAtPoint(QPointF(1, 1)), page)
self.assertEqual(collection.pageAtPoint(QPointF(10, 10)), page)
self.assertFalse(collection.pageAtPoint(QPointF(-10, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(1000, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(10, -10)))
self.assertFalse(collection.pageAtPoint(QPointF(10, 1000)))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageAtPoint(QPointF(1, 1)), page)
self.assertEqual(collection.pageAtPoint(QPointF(10, 10)), page)
self.assertFalse(collection.pageAtPoint(QPointF(-10, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(1000, 10)))
self.assertFalse(collection.pageAtPoint(QPointF(10, -10)))
self.assertEqual(collection.pageAtPoint(QPointF(10, 330)), page2)
self.assertEqual(collection.pageAtPoint(QPointF(10, 500)), page2)
self.assertFalse(collection.pageAtPoint(QPointF(10, 600)))
def testPagePositionToLayout(self):
"""
Test pagePositionToLayoutPosition
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# invalid pages
self.assertEqual(collection.pagePositionToLayoutPosition(-1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(100, QgsLayoutPoint(1, 1)), QPointF(1, 1))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
#invalid pages
self.assertEqual(collection.pagePositionToLayoutPosition(-1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
#valid page
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6)), QPointF(5, 6))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)), QPointF(50, 60))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
#invalid pages
self.assertEqual(collection.pagePositionToLayoutPosition(-1, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(3, QgsLayoutPoint(1, 1)), QPointF(1, 1))
#valid pages
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(1, 1)), QPointF(1, 1))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6)), QPointF(5, 6))
self.assertEqual(collection.pagePositionToLayoutPosition(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)), QPointF(50, 60))
self.assertEqual(collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(1, 1)), QPointF(1, 308.0))
self.assertEqual(collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(5, 6)), QPointF(5, 313.0))
self.assertEqual(collection.pagePositionToLayoutPosition(1, QgsLayoutPoint(0.5, 0.6, QgsUnitTypes.LayoutCentimeters)), QPointF(5, 313.0))
def testPagePositionToAbsolute(self):
"""
Test pagePositionToAbsolute
"""
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# invalid pages
self.assertEqual(collection.pagePositionToAbsolute(-1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(100, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
#invalid pages
self.assertEqual(collection.pagePositionToAbsolute(-1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
#valid page
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6)), QgsLayoutPoint(5, 6))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)), QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters))
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
#invalid pages
self.assertEqual(collection.pagePositionToAbsolute(-1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(3, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
#valid pages
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 1))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6)), QgsLayoutPoint(5, 6))
self.assertEqual(collection.pagePositionToAbsolute(0, QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters)), QgsLayoutPoint(5, 6, QgsUnitTypes.LayoutCentimeters))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(1, 1)), QgsLayoutPoint(1, 308.0))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(5, 6)), QgsLayoutPoint(5, 313.0))
self.assertEqual(collection.pagePositionToAbsolute(1, QgsLayoutPoint(0.5, 0.6, QgsUnitTypes.LayoutCentimeters)), QgsLayoutPoint(0.5, 31.3, QgsUnitTypes.LayoutCentimeters))
def testVisiblePages(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
self.assertFalse(collection.visiblePages(QRectF(0, 0, 10, 10)))
self.assertFalse(collection.visiblePageNumbers(QRectF(0, 0, 10, 10)))
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertFalse(collection.visiblePages(QRectF(-10, -10, 5, 5)))
self.assertFalse(collection.visiblePageNumbers(QRectF(-10, -10, 5, 5)))
self.assertEqual(collection.visiblePages(QRectF(-10, -10, 15, 15)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(-10, -10, 15, 15)), [0])
self.assertEqual(collection.visiblePages(QRectF(200, 200, 115, 115)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(200, 200, 115, 115)), [0])
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertFalse(collection.visiblePages(QRectF(-10, -10, 5, 5)))
self.assertFalse(collection.visiblePageNumbers(QRectF(-10, -10, 5, 5)))
self.assertEqual(collection.visiblePages(QRectF(-10, -10, 15, 15)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(-10, -10, 15, 15)), [0])
self.assertEqual(collection.visiblePages(QRectF(200, 200, 115, 115)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(200, 200, 115, 115)), [0])
self.assertEqual(collection.visiblePages(QRectF(200, 200, 115, 615)), [page])
self.assertEqual(collection.visiblePageNumbers(QRectF(200, 200, 115, 115)), [0])
self.assertEqual(collection.visiblePages(QRectF(100, 200, 115, 615)), [page, page2])
self.assertEqual(collection.visiblePageNumbers(QRectF(100, 200, 115, 115)), [0, 1])
self.assertEqual(collection.visiblePages(QRectF(100, 310, 115, 615)), [page2])
self.assertEqual(collection.visiblePageNumbers(QRectF(100, 310, 115, 115)), [1])
def testTakePage(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add some pages
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
self.assertEqual(collection.pageCount(), 2)
self.assertFalse(collection.takePage(None))
self.assertEqual(collection.takePage(page), page)
self.assertFalse(sip.isdeleted(page))
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.pages(), [page2])
self.assertEqual(collection.page(0), page2)
self.assertEqual(collection.takePage(page2), page2)
self.assertFalse(sip.isdeleted(page2))
self.assertEqual(collection.pageCount(), 0)
self.assertEqual(collection.pages(), [])
self.assertFalse(collection.page(0))
def testReadWriteXml(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
fill = QgsSimpleFillSymbolLayer()
fill_symbol = QgsFillSymbol()
fill_symbol.changeSymbolLayer(0, fill)
fill.setColor(Qt.green)
fill.setStrokeColor(Qt.red)
fill.setStrokeWidth(6)
collection.setPageStyleSymbol(fill_symbol)
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
self.assertEqual(collection.pageNumber(page), -1)
collection.addPage(page)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
self.assertTrue(collection.writeXml(elem, doc, QgsReadWriteContext()))
l2 = QgsLayout(p)
collection2 = l2.pageCollection()
self.assertTrue(collection2.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertEqual(collection2.pageCount(), 2)
self.assertEqual(collection2.page(0).pageSize().width(), 210)
self.assertEqual(collection2.page(0).pageSize().height(), 297)
self.assertEqual(collection2.page(1).pageSize().width(), 148)
self.assertEqual(collection2.page(1).pageSize().height(), 210)
self.assertEqual(collection2.pageStyleSymbol().symbolLayer(0).color().name(), '#00ff00')
self.assertEqual(collection2.pageStyleSymbol().symbolLayer(0).strokeColor().name(), '#ff0000')
def testUndoRedo(self):
p = QgsProject()
l = QgsLayout(p)
collection = l.pageCollection()
# add a page
page = QgsLayoutItemPage(l)
page.setPageSize('A4')
collection.addPage(page)
self.assertEqual(collection.pageCount(), 1)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 0)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 1)
# make sure page is accessible
self.assertEqual(collection.page(0).pageSize().width(), 210)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize('A5')
collection.addPage(page2)
# delete page
collection.deletePage(collection.page(0))
self.assertEqual(collection.pageCount(), 1)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 2)
# make sure pages are accessible
self.assertEqual(collection.page(0).pageSize().width(), 210)
self.assertEqual(collection.page(1).pageSize().width(), 148)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 1)
l.undoStack().stack().undo()
self.assertEqual(collection.pageCount(), 0)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.page(0).pageSize().width(), 210)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 2)
self.assertEqual(collection.page(0).pageSize().width(), 210)
self.assertEqual(collection.page(1).pageSize().width(), 148)
l.undoStack().stack().redo()
self.assertEqual(collection.pageCount(), 1)
self.assertEqual(collection.page(0).pageSize().width(), 148)
def testResizeToContents(self):
p = QgsProject()
l = QgsLayout(p)
# no items -- no crash!
l.pageCollection().resizeToContents(QgsMargins(1, 2, 3, 4), QgsUnitTypes.LayoutCentimeters)
page = QgsLayoutItemPage(l)
page.setPageSize("A5", QgsLayoutItemPage.Landscape)
l.pageCollection().addPage(page)
# no items, no change
l.pageCollection().resizeToContents(QgsMargins(1, 2, 3, 4), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(l.pageCollection().pageCount(), 1)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().width(), 210.0, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().height(), 148.0, 2)
p = QgsProject()
l = QgsLayout(p)
shape1 = QgsLayoutItemShape(l)
shape1.attemptResize(QgsLayoutSize(90, 50))
shape1.attemptMove(QgsLayoutPoint(90, 50))
shape1.setItemRotation(45, False)
l.addLayoutItem(shape1)
shape2 = QgsLayoutItemShape(l)
shape2.attemptResize(QgsLayoutSize(110, 50))
shape2.attemptMove(QgsLayoutPoint(100, 150), True, False, 0)
l.addLayoutItem(shape2)
shape3 = QgsLayoutItemShape(l)
l.addLayoutItem(shape3)
shape3.attemptResize(QgsLayoutSize(50, 100))
shape3.attemptMove(QgsLayoutPoint(210, 250), True, False, 0)
shape4 = QgsLayoutItemShape(l)
l.addLayoutItem(shape4)
shape4.attemptResize(QgsLayoutSize(50, 30))
shape4.attemptMove(QgsLayoutPoint(10, 340), True, False, 0)
shape4.setVisibility(False)
# resize with no existing pages
l.pageCollection().resizeToContents(QgsMargins(1, 2, 3, 4), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(l.pageCollection().pageCount(), 1)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().width(), 290.3, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().height(), 380.36, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertAlmostEqual(shape1.positionWithUnits().x(), 90.15, 2)
self.assertAlmostEqual(shape1.positionWithUnits().y(), 20.21, 2)
self.assertAlmostEqual(shape2.positionWithUnits().x(), 100.15, 2)
self.assertAlmostEqual(shape2.positionWithUnits().y(), 120.21, 2)
self.assertAlmostEqual(shape3.positionWithUnits().x(), 210.15, 2)
self.assertAlmostEqual(shape3.positionWithUnits().y(), 220.21, 2)
self.assertAlmostEqual(shape4.positionWithUnits().x(), 10.15, 2)
self.assertAlmostEqual(shape4.positionWithUnits().y(), 310.21, 2)
# add a second page
page2 = QgsLayoutItemPage(l)
page2.setPageSize("A4", QgsLayoutItemPage.Landscape)
l.pageCollection().addPage(page2)
# add some guides
g1 = QgsLayoutGuide(Qt.Horizontal, QgsLayoutMeasurement(2.5, QgsUnitTypes.LayoutCentimeters), l.pageCollection().page(0))
l.guides().addGuide(g1)
g2 = QgsLayoutGuide(Qt.Vertical, QgsLayoutMeasurement(4.5, QgsUnitTypes.LayoutCentimeters), l.pageCollection().page(0))
l.guides().addGuide(g2)
# second page should be removed
l.pageCollection().resizeToContents(QgsMargins(0, 0, 0, 0), QgsUnitTypes.LayoutCentimeters)
self.assertEqual(l.pageCollection().pageCount(), 1)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().width(), 250.3, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().height(), 320.36, 2)
self.assertAlmostEqual(l.pageCollection().page(0).sizeWithUnits().units(), QgsUnitTypes.LayoutMillimeters)
self.assertAlmostEqual(g1.position().length(), 0.5, 2)
self.assertAlmostEqual(g2.position().length(), 3.5, 2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 8,580,895,409,850,832,000 | 40.020471 | 179 | 0.652594 | false |
wkritzinger/asuswrt-merlin | release/src/router/samba-3.5.8/source4/heimdal/lib/wind/gen-errorlist.py | 22 | 3655 | #!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id: gen-errorlist.py,v 1.1.1.1 2011/06/10 09:34:43 andrew Exp $
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
import sys
import generate
import rfc3454
import rfc4518
import stringprep
if len(sys.argv) != 3:
print "usage: %s rfc3454.txt out-dir" % sys.argv[0]
sys.exit(1)
tables = rfc3454.read(sys.argv[1])
t2 = rfc4518.read()
for x in t2.iterkeys():
tables[x] = t2[x]
error_list = stringprep.get_errorlist()
errorlist_h = generate.Header('%s/errorlist_table.h' % sys.argv[2])
errorlist_c = generate.Implementation('%s/errorlist_table.c' % sys.argv[2])
errorlist_h.file.write(
'''
#include "windlocl.h"
struct error_entry {
uint32_t start;
unsigned len;
wind_profile_flags flags;
};
extern const struct error_entry _wind_errorlist_table[];
extern const size_t _wind_errorlist_table_size;
''')
errorlist_c.file.write(
'''
#include "errorlist_table.h"
const struct error_entry _wind_errorlist_table[] = {
''')
trans=[]
for t in error_list.iterkeys():
for l in tables[t]:
m = re.search('^ *([0-9A-F]+)-([0-9A-F]+); *(.*) *$', l)
if m:
start = int(m.group(1), 0x10)
end = int(m.group(2), 0x10)
desc = m.group(3)
trans.append([start, end - start + 1, desc, [t]])
else:
m = re.search('^ *([0-9A-F]+); *(.*) *$', l)
if m:
trans.append([int(m.group(1), 0x10), 1, m.group(2), [t]])
trans = stringprep.sort_merge_trans(trans)
for x in trans:
(start, length, description, tables) = x
symbols = stringprep.symbols(error_list, tables)
if len(symbols) == 0:
print "no symbol for %s" % description
sys.exit(1)
errorlist_c.file.write(" {0x%x, 0x%x, %s}, /* %s: %s */\n"
% (start, length, symbols, ",".join(tables), description))
errorlist_c.file.write(
'''};
''')
errorlist_c.file.write(
"const size_t _wind_errorlist_table_size = %u;\n" % len(trans))
errorlist_h.close()
errorlist_c.close()
| gpl-2.0 | -8,359,673,033,513,507,000 | 29.45 | 77 | 0.671866 | false |
wkeeling/ansible | lib/ansible/cli/vault.py | 4 | 6699 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.module_utils._text import to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
def __init__(self, args):
self.vault_pass = None
self.new_vault_pass = None
super(VaultCLI, self).__init__(args)
def parse(self):
self.parser = CLI.base_parser(
vault_opts=True,
usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to self.actions
if self.action == "create":
self.parser.set_usage("usage: %prog create [options] file_name")
elif self.action == "decrypt":
self.parser.set_usage("usage: %prog decrypt [options] file_name")
elif self.action == "edit":
self.parser.set_usage("usage: %prog edit [options] file_name")
elif self.action == "view":
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
can_output = ['encrypt', 'decrypt']
if self.action not in can_output:
if self.options.output_file:
raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output))
if len(self.args) == 0:
raise AnsibleOptionsError("Vault requires at least one filename as a parameter")
else:
# This restriction should remain in place until it's possible to
# load multiple YAML records from a single file, or it's too easy
# to create an encrypted file that can't be read back in. But in
# the meanwhile, "cat a b c|ansible-vault encrypt --output x" is
# a workaround.
if self.options.output_file and len(self.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
# set default restrictive umask
old_umask = os.umask(0o077)
if self.options.vault_password_file:
# read vault_pass from a file
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader)
else:
newpass = False
rekey = False
if not self.options.new_vault_password_file:
newpass = (self.action in ['create', 'rekey', 'encrypt'])
rekey = (self.action == 'rekey')
self.vault_pass, self.new_vault_pass = self.ask_vault_passwords(ask_new_vault_pass=newpass, rekey=rekey)
if self.options.new_vault_password_file:
# for rekey only
self.new_vault_pass = CLI.read_vault_password_file(self.options.new_vault_password_file, loader)
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
self.editor = VaultEditor(self.vault_pass)
self.execute()
# and restore umask
os.umask(old_umask)
def execute_encrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
display.display("Reading plaintext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.encrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
def execute_decrypt(self):
if len(self.args) == 0 and sys.stdin.isatty():
display.display("Reading ciphertext input from stdin", stderr=True)
for f in self.args or ['-']:
self.editor.decrypt_file(f, output_file=self.options.output_file)
if sys.stdout.isatty():
display.display("Decryption successful", stderr=True)
def execute_create(self):
if len(self.args) > 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
self.editor.create_file(self.args[0])
def execute_edit(self):
for f in self.args:
self.editor.edit_file(f)
def execute_view(self):
for f in self.args:
# Note: vault should return byte strings because it could encrypt
# and decrypt binary files. We are responsible for changing it to
# unicode here because we are displaying it and therefore can make
# the decision that the display doesn't have to be precisely what
# the input was (leave that to decrypt instead)
self.pager(to_text(self.editor.plaintext(f)))
def execute_rekey(self):
for f in self.args:
if not (os.path.isfile(f)):
raise AnsibleError(f + " does not exist")
for f in self.args:
self.editor.rekey_file(f, self.new_vault_pass)
display.display("Rekey successful", stderr=True)
| gpl-3.0 | -7,298,703,271,782,055,000 | 37.0625 | 130 | 0.63293 | false |
Jeremy123W/Optimizing-Employee-Shuttle-Stops | Scripts/sector4.py | 1 | 3275 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 20:31:46 2017
@author: jeremy
"""
import math
import pandas as pd
import gmplot
import itertools
import datetime
emp_addresses = pd.read_csv('employees_with_geocode.csv')
bus_stops = pd.read_csv('bus_stops_with_geocode.csv')
#37.78<longitude <37.793
#-122.402<lattitude<-122.392
class Point(object):
'''
A point in n dimensional space
'''
def __init__(self, coords):
'''
coords - A list of values, one per dimension
'''
self.coords = coords
self.n = len(coords)
def __repr__(self):
return str(self.coords)
def getDistance(a, b):
'''
Euclidean distance between two n-dimensional points.
https://en.wikipedia.org/wiki/Euclidean_distance#n_dimensions
Note: This can be very slow and does not scale well
'''
if a.n != b.n:
raise Exception("ERROR: non comparable points")
accumulatedDifference = 0.0
for i in range(a.n):
squareDifference = pow((a.coords[i]-b.coords[i]), 2)
accumulatedDifference += squareDifference
distance = math.sqrt(accumulatedDifference)
return distance
gmap = gmplot.GoogleMapPlotter(37.75, -122.427325, 13)
location = []
for index,row in emp_addresses.iterrows():
if row['longitudes']< -122.392 and row['longitudes']>-122.402 and row['lattitudes']<37.793 and row['lattitudes']>37.78:
tmp=[]
tmp.append(row['lattitudes'])
tmp.append(row['longitudes'])
location.append(tmp)
gmap.marker(row['lattitudes'],row['longitudes'],title=row['address']+' employee_id '+str(row['employee_id']))
bus_stop=[]
for index,row in bus_stops.iterrows():
if row['longitudes']< -122.392 and row['longitudes']>-122.402 and row['lattitudes']<37.793 and row['lattitudes']>37.78:
tmp=[]
tmp.append(row['lattitudes'])
tmp.append(row['longitudes'])
bus_stop.append(tmp)
#gmap.marker(row['lattitudes'],row['longitudes'],c='blue',title=str(row['lattitudes'])+' '+str(row['longitudes']))
location_objects = []
for each in location:
location_objects.append(Point(each))
bus_stop_objects = []
for each in bus_stop:
bus_stop_objects.append(Point(each))
start_time = datetime.datetime.now()
print("Start time: ",start_time)
total_distances=[]
bus_combinations=[]
for combination in itertools.combinations(bus_stop,2):
distance=0
for l_obj in location_objects:
tmp=[]
for each in combination:
bus_object = Point(each)
tmp.append(getDistance(l_obj,bus_object))
distance += min(tmp)
total_distances.append(distance)
bus_combinations.append(combination)
index_min = min(range(len(total_distances)), key=total_distances.__getitem__)
print('overall distance: ',total_distances[index_min])
print('bus stop locations: ',bus_combinations[index_min])
print("TIME UNITL END: ",datetime.datetime.now()-start_time)
for each in bus_combinations[index_min]:
gmap.marker(each[0],each[1],c='green',title="centroid")
# color='#FF0000', c=None, title="no implementation"
gmap.draw("section4_2_stops.html") | gpl-3.0 | -5,817,398,604,057,692,000 | 27.486957 | 127 | 0.634504 | false |
Eyepea/aiohttp | tests/test_run_app.py | 1 | 16585 | import asyncio
import contextlib
import os
import platform
import signal
import socket
import ssl
import subprocess
import sys
from unittest import mock
from uuid import uuid4
import pytest
from aiohttp import web
from aiohttp.test_utils import loop_context
# Test for features of OS' socket support
_has_unix_domain_socks = hasattr(socket, 'AF_UNIX')
if _has_unix_domain_socks:
_abstract_path_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
_abstract_path_sock.bind(b"\x00" + uuid4().hex.encode('ascii'))
except FileNotFoundError:
_abstract_path_failed = True
else:
_abstract_path_failed = False
finally:
_abstract_path_sock.close()
del _abstract_path_sock
else:
_abstract_path_failed = True
skip_if_no_abstract_paths = pytest.mark.skipif(
_abstract_path_failed,
reason="Linux-style abstract paths are not supported."
)
skip_if_no_unix_socks = pytest.mark.skipif(
not _has_unix_domain_socks,
reason="Unix domain sockets are not supported"
)
del _has_unix_domain_socks, _abstract_path_failed
# tokio event loop does not allow to override attributes
def skip_if_no_dict(loop):
if not hasattr(loop, '__dict__'):
pytest.skip("can not override loop attributes")
def skip_if_on_windows():
if platform.system() == "Windows":
pytest.skip("the test is not valid for Windows")
def stopper(loop):
def f(*args):
loop.call_later(0.001, loop.stop)
return f
def test_run_app_http(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
web.run_app(app, loop=loop, print=stopper(loop))
assert not loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8080,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_close_loop(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
asyncio.set_event_loop(loop)
app = web.Application()
mocker.spy(app, 'startup')
web.run_app(app, print=stopper(loop))
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8080,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
asyncio.set_event_loop(None)
mock_unix_server_single = [
mock.call(mock.ANY, '/tmp/testsock1.sock', ssl=None, backlog=128),
]
mock_unix_server_multi = [
mock.call(mock.ANY, '/tmp/testsock1.sock', ssl=None, backlog=128),
mock.call(mock.ANY, '/tmp/testsock2.sock', ssl=None, backlog=128),
]
mock_server_single = [
mock.call(mock.ANY, '127.0.0.1', 8080, ssl=None, backlog=128),
]
mock_server_multi = [
mock.call(mock.ANY, ('127.0.0.1', '192.168.1.1'), 8080, ssl=None,
backlog=128),
]
mock_server_default_8989 = [
mock.call(mock.ANY, '0.0.0.0', 8989, ssl=None, backlog=128)
]
mock_socket = mock.Mock(getsockname=lambda: ('mock-socket', 123))
mixed_bindings_tests = (
(
"Nothing Specified",
{},
[mock.call(mock.ANY, '0.0.0.0', 8080, ssl=None, backlog=128)],
[]
),
(
"Port Only",
{'port': 8989},
mock_server_default_8989,
[]
),
(
"Multiple Hosts",
{'host': ('127.0.0.1', '192.168.1.1')},
mock_server_multi,
[]
),
(
"Multiple Paths",
{'path': ('/tmp/testsock1.sock', '/tmp/testsock2.sock')},
[],
mock_unix_server_multi
),
(
"Multiple Paths, Port",
{'path': ('/tmp/testsock1.sock', '/tmp/testsock2.sock'),
'port': 8989},
mock_server_default_8989,
mock_unix_server_multi,
),
(
"Multiple Paths, Single Host",
{'path': ('/tmp/testsock1.sock', '/tmp/testsock2.sock'),
'host': '127.0.0.1'},
mock_server_single,
mock_unix_server_multi
),
(
"Single Path, Single Host",
{'path': '/tmp/testsock1.sock', 'host': '127.0.0.1'},
mock_server_single,
mock_unix_server_single
),
(
"Single Path, Multiple Hosts",
{'path': '/tmp/testsock1.sock', 'host': ('127.0.0.1', '192.168.1.1')},
mock_server_multi,
mock_unix_server_single
),
(
"Single Path, Port",
{'path': '/tmp/testsock1.sock', 'port': 8989},
mock_server_default_8989,
mock_unix_server_single
),
(
"Multiple Paths, Multiple Hosts, Port",
{'path': ('/tmp/testsock1.sock', '/tmp/testsock2.sock'),
'host': ('127.0.0.1', '192.168.1.1'), 'port': 8000},
[mock.call(mock.ANY, ('127.0.0.1', '192.168.1.1'), 8000, ssl=None,
backlog=128)],
mock_unix_server_multi
),
(
"Only socket",
{"sock": [mock_socket]},
[mock.call(mock.ANY, ssl=None, sock=mock_socket, backlog=128)],
[],
),
(
"Socket, port",
{"sock": [mock_socket], "port": 8765},
[mock.call(mock.ANY, '0.0.0.0', 8765, ssl=None, backlog=128),
mock.call(mock.ANY, sock=mock_socket, ssl=None, backlog=128)],
[],
),
(
"Socket, Host, No port",
{"sock": [mock_socket], "host": 'localhost'},
[mock.call(mock.ANY, 'localhost', 8080, ssl=None, backlog=128),
mock.call(mock.ANY, sock=mock_socket, ssl=None, backlog=128)],
[],
),
)
mixed_bindings_test_ids = [test[0] for test in mixed_bindings_tests]
mixed_bindings_test_params = [test[1:] for test in mixed_bindings_tests]
@pytest.mark.parametrize(
'run_app_kwargs, expected_server_calls, expected_unix_server_calls',
mixed_bindings_test_params,
ids=mixed_bindings_test_ids
)
def test_run_app_mixed_bindings(mocker, run_app_kwargs, expected_server_calls,
expected_unix_server_calls):
app = mocker.MagicMock()
loop = mocker.MagicMock()
mocker.patch('asyncio.gather')
web.run_app(app, loop=loop, print=lambda *args: None, **run_app_kwargs)
assert loop.create_unix_server.mock_calls == expected_unix_server_calls
assert loop.create_server.mock_calls == expected_server_calls
def test_run_app_http_access_format(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
web.run_app(app, loop=loop,
print=stopper(loop), access_log_format='%a')
assert not loop.is_closed()
cs = loop.create_server
cs.assert_called_with(mock.ANY, '0.0.0.0', 8080, ssl=None, backlog=128)
assert cs.call_args[0][0]._kwargs['access_log_format'] == '%a'
app.startup.assert_called_once_with()
def test_run_app_https(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
ssl_context = ssl.create_default_context()
web.run_app(app, loop=loop,
ssl_context=ssl_context, print=stopper(loop))
assert not loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8443,
ssl=ssl_context, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_nondefault_host_port(loop, unused_port, mocker):
skip_if_no_dict(loop)
port = unused_port()
host = '127.0.0.1'
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
web.run_app(app, loop=loop,
host=host, port=port, print=stopper(loop))
assert not loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, host, port,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_custom_backlog(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
web.run_app(app, loop=loop, backlog=10, print=stopper(loop))
assert not loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8080,
ssl=None, backlog=10)
app.startup.assert_called_once_with()
@skip_if_no_unix_socks
def test_run_app_http_unix_socket(loop, mocker, shorttmpdir):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_unix_server')
app = web.Application()
mocker.spy(app, 'startup')
sock_path = str(shorttmpdir.join('socket.sock'))
printer = mock.Mock(wraps=stopper(loop))
web.run_app(app, loop=loop, path=sock_path,
print=printer)
assert not loop.is_closed()
loop.create_unix_server.assert_called_with(mock.ANY, sock_path,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
assert "http://unix:{}:".format(sock_path) in printer.call_args[0][0]
@skip_if_no_unix_socks
def test_run_app_https_unix_socket(loop, mocker, shorttmpdir):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_unix_server')
app = web.Application()
mocker.spy(app, 'startup')
sock_path = str(shorttmpdir.join('socket.sock'))
ssl_context = ssl.create_default_context()
printer = mock.Mock(wraps=stopper(loop))
web.run_app(app, loop=loop, path=sock_path, ssl_context=ssl_context,
print=printer)
assert not loop.is_closed()
loop.create_unix_server.assert_called_with(mock.ANY, sock_path,
ssl=ssl_context, backlog=128)
app.startup.assert_called_once_with()
assert "https://unix:{}:".format(sock_path) in printer.call_args[0][0]
@skip_if_no_unix_socks
def test_run_app_stale_unix_socket(loop, mocker, shorttmpdir):
"""Older asyncio event loop implementations are known to halt server
creation when a socket path from a previous server bind still exists.
"""
skip_if_no_dict(loop)
app = web.Application()
sock_path = shorttmpdir.join('socket.sock')
sock_path_string = str(sock_path)
web.run_app(app, loop=loop,
path=sock_path_string, print=stopper(loop))
assert not loop.is_closed()
if sock_path.check():
# New app run using same socket path
with loop_context() as loop:
mocker.spy(loop, 'create_unix_server')
app = web.Application()
mocker.spy(app, 'startup')
mocker.spy(os, 'remove')
printer = mock.Mock(wraps=stopper(loop))
web.run_app(app, loop=loop,
path=sock_path_string, print=printer)
os.remove.assert_called_with(sock_path_string)
loop.create_unix_server.assert_called_with(
mock.ANY,
sock_path_string,
ssl=None,
backlog=128
)
app.startup.assert_called_once_with()
assert ("http://unix:{}:".format(sock_path)
in printer.call_args[0][0])
@skip_if_no_unix_socks
@skip_if_no_abstract_paths
def test_run_app_abstract_linux_socket(loop, mocker):
sock_path = b"\x00" + uuid4().hex.encode('ascii')
app = web.Application()
web.run_app(
app, path=sock_path.decode('ascii', 'ignore'), loop=loop,
print=stopper(loop))
# New app run using same socket path
with loop_context() as loop:
mocker.spy(loop, 'create_unix_server')
app = web.Application()
mocker.spy(app, 'startup')
mocker.spy(os, 'remove')
web.run_app(app, path=sock_path, print=stopper(loop), loop=loop)
# Abstract paths don't exist on the file system, so no attempt should
# be made to remove.
assert mock.call([sock_path]) not in os.remove.mock_calls
loop.create_unix_server.assert_called_with(
mock.ANY,
sock_path,
ssl=None,
backlog=128
)
app.startup.assert_called_once_with()
@skip_if_no_unix_socks
def test_run_app_existing_file_conflict(loop, mocker, shorttmpdir):
app = web.Application()
sock_path = shorttmpdir.join('socket.sock')
sock_path.ensure()
sock_path_str = str(sock_path)
mocker.spy(os, 'remove')
with pytest.raises(OSError):
web.run_app(app, loop=loop,
path=sock_path_str, print=mock.Mock())
# No attempt should be made to remove a non-socket file
assert mock.call([sock_path_str]) not in os.remove.mock_calls
def test_run_app_preexisting_inet_socket(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
sock = socket.socket()
with contextlib.closing(sock):
sock.bind(('0.0.0.0', 0))
_, port = sock.getsockname()
printer = mock.Mock(wraps=stopper(loop))
web.run_app(app, loop=loop, sock=sock, print=printer)
assert not loop.is_closed()
loop.create_server.assert_called_with(
mock.ANY, sock=sock, backlog=128, ssl=None
)
app.startup.assert_called_once_with()
assert "http://0.0.0.0:{}".format(port) in printer.call_args[0][0]
@skip_if_no_unix_socks
def test_run_app_preexisting_unix_socket(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
sock_path = '/tmp/test_preexisting_sock1'
sock = socket.socket(socket.AF_UNIX)
with contextlib.closing(sock):
sock.bind(sock_path)
os.unlink(sock_path)
printer = mock.Mock(wraps=stopper(loop))
web.run_app(app, loop=loop, sock=sock, print=printer)
assert not loop.is_closed()
loop.create_server.assert_called_with(
mock.ANY, sock=sock, backlog=128, ssl=None
)
app.startup.assert_called_once_with()
assert "http://unix:{}:".format(sock_path) in printer.call_args[0][0]
def test_run_app_multiple_preexisting_sockets(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
sock1 = socket.socket()
sock2 = socket.socket()
with contextlib.closing(sock1), contextlib.closing(sock2):
sock1.bind(('0.0.0.0', 0))
_, port1 = sock1.getsockname()
sock2.bind(('0.0.0.0', 0))
_, port2 = sock2.getsockname()
printer = mock.Mock(wraps=stopper(loop))
web.run_app(app, loop=loop, sock=(sock1, sock2), print=printer)
loop.create_server.assert_has_calls([
mock.call(mock.ANY, sock=sock1, backlog=128, ssl=None),
mock.call(mock.ANY, sock=sock2, backlog=128, ssl=None)
])
app.startup.assert_called_once_with()
assert "http://0.0.0.0:{}".format(port1) in printer.call_args[0][0]
assert "http://0.0.0.0:{}".format(port2) in printer.call_args[0][0]
_script_test_signal = """
from aiohttp import web
app = web.Application()
web.run_app(app, host=())
"""
def test_sigint(loop, mocker):
skip_if_on_windows()
proc = subprocess.Popen([sys.executable, "-u", "-c", _script_test_signal],
stdout=subprocess.PIPE)
for line in proc.stdout:
if line.startswith(b"======== Running on"):
break
proc.send_signal(signal.SIGINT)
assert proc.wait() == 0
def test_sigterm(loop, mocker):
skip_if_on_windows()
proc = subprocess.Popen([sys.executable, "-u", "-c", _script_test_signal],
stdout=subprocess.PIPE)
for line in proc.stdout:
if line.startswith(b"======== Running on"):
break
proc.terminate()
assert proc.wait() == 0
def test_startup_cleanup_signals(loop, mocker):
skip_if_no_dict(loop)
mocker.spy(loop, 'create_server')
app = web.Application()
mocker.spy(app, 'startup')
mocker.spy(app, 'cleanup')
web.run_app(app, loop=loop, host=(), print=stopper(loop))
app.startup.assert_called_once_with()
app.cleanup.assert_called_once_with()
def test_startup_cleanup_signals_even_on_failure(loop, mocker):
skip_if_no_dict(loop)
setattr(loop, 'create_server', mock.Mock(side_effect=RuntimeError()))
app = web.Application()
mocker.spy(app, 'startup')
mocker.spy(app, 'cleanup')
with pytest.raises(RuntimeError):
web.run_app(app, loop=loop, print=stopper(loop))
app.startup.assert_called_once_with()
app.cleanup.assert_called_once_with()
| apache-2.0 | 6,413,558,800,703,907,000 | 28.198944 | 78 | 0.60404 | false |
Yelp/paasta | tests/deployd/test_queue.py | 1 | 3654 | import time
from queue import Empty
from pytest import fixture
from pytest import raises
from zake.fake_client import FakeClient
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
def make_si(wait_until, bounce_by):
"""Just using mock.Mock(wait_until=wait_until, bounce_by=bounce_by) mostly works, but our PriorityQueues
occasionally will compare two ServiceInstances directly, and Mocks aren't comparable unless you define an __eq__."""
return ServiceInstance(
service="service",
instance="instance",
bounce_by=bounce_by,
wait_until=wait_until,
watcher="watcher",
failures=0,
processed_count=0,
bounce_start_time=1.0,
enqueue_time=2.0,
)
class TestZKDelayDeadlineQueue:
@fixture
def queue(self):
client = FakeClient()
client.start()
yield ZKDelayDeadlineQueue(client, "/")
@fixture
def multiple_queues(self):
client = FakeClient()
client.start()
yield [ZKDelayDeadlineQueue(client, "/") for _ in range(5)]
def test_put_then_get_single_threaded(self, queue):
si = make_si(wait_until=time.time() - 0.01, bounce_by=time.time())
queue.put(si)
# block=false or a really short timeout would fail here, as we have to wait for queue's watchers to be notified
# by ZK that something has changed.
with queue.get(timeout=1.0) as result:
assert result == si
# Non-blocking get should return results immediately if we force _update_local_state.
queue.put(si)
queue._update_local_state(None)
with queue.get(block=False) as result:
assert result == si
def test_put_then_get_different_instances(self, multiple_queues):
queue1 = multiple_queues[0]
queue2 = multiple_queues[1]
si = make_si(wait_until=time.time() - 0.01, bounce_by=time.time())
queue1.put(si)
# block=false or a really short timeout would fail here, as we have to wait for queue2's watchers to be notified
# by ZK that something has changed.
with queue2.get(timeout=1.0) as result:
assert result == si
# Non-blocking get should return results immediately if we force _update_local_state.
queue1.put(si)
queue2._update_local_state(None)
with queue2.get(block=False) as result:
assert result == si
def test_dont_block_indefinitely_when_wait_until_is_in_future(self, queue):
"""Regression test for a specific bug in the first implementation of DelayDeadlineQueue"""
# First, put an item with a distant wait_until
queue.put(make_si(wait_until=time.time() + 100, bounce_by=time.time() + 100))
# an immediate get should fail.
with raises(Empty):
with queue.get(block=False) as result:
print(f"Should have raised, got {result}")
# a get with a short timeout should fail.
with raises(Empty):
with queue.get(timeout=0.0001) as result:
print(f"Should have raised, got {result}")
wait_until = time.time() + 0.01
queue.put(make_si(wait_until=wait_until, bounce_by=wait_until))
# but if we wait a short while it should return.
with queue.get(
timeout=1.0
) as result: # This timeout is only there so that if this test fails it doesn't take forever.
pass
assert (
time.time() + 0.001 > wait_until
) # queue rounds to millisecond, so we might be slightly under.
| apache-2.0 | -867,561,398,820,208,500 | 37.463158 | 120 | 0.637931 | false |
arnaudsj/pybrain | pybrain/tools/plotting/ciaoplot.py | 6 | 1584 | __author__ = 'Tom Schaul, [email protected]'
from scipy import zeros, array, amin, amax, sqrt
from colormaps import ColorMap
class CiaoPlot(ColorMap):
""" CIAO plot of coevolution performance with respect to the best
individuals from previous generations (Hall of Fame).
Requires 2 populations. """
@staticmethod
def generateData(evaluator, hof1, hof2, symmetric=True):
assert len(hof1) == len(hof2)
gens = len(hof1)
res = zeros((gens, gens))
for g1, ind1 in enumerate(hof1):
for g2, ind2 in enumerate(hof2[:g1 + 1]):
res[g1, g2] = evaluator(ind1, ind2)
if symmetric:
res[g2, g1] = res[g1, g2]
elif g1 == g2:
# TODO: chack this!
res[g1, g2] += evaluator(ind2, ind1)
else:
res[g2, g1] = evaluator(ind2, ind1)
return res
def __init__(self, evaluator, hof1, hof2, **args):
if 'symmetric' in args:
M = CiaoPlot.generateData(evaluator, hof1, hof2, symmetric=args['symmetric'])
del args['symmetric']
else:
M = CiaoPlot.generateData(evaluator, hof1, hof2)
M *= 1 / (amin(M) - amax(M))
M -= amin(M)
self.relData = M
ColorMap.__init__(self, M, minvalue=0, maxvalue=1, **args)
if __name__ == '__main__':
x = array(range(100))
h1 = x * 4
h2 = x + 20 * sqrt(x)
def evo(x, y):
return x - y
from pylab import cm
p = CiaoPlot(evo, h1, h2, cmap=cm.hot).show()
| bsd-3-clause | 1,752,950,691,779,675,600 | 31.326531 | 89 | 0.535354 | false |
WesleyAC/lemonscript-transpiler | tests/test_transpiler.py | 1 | 4113 | import os
import subprocess
import shutil
import transpiler
class TestTranspiler:
def clean_auto_funcs(self):
try:
shutil.rmtree("/tmp/auto_files")
os.remove("/tmp/auto_functions.cpp")
os.remove("/tmp/auto_functions.h")
os.remove("auto_functions.cpp")
os.remove("auto_functions.h")
except OSError:
pass
def test_transpiler_creates_files_without_format(self):
self.clean_auto_funcs()
transpiler.main(["--output-cpp", "/tmp/auto_functions.cpp", "--output-header", "/tmp/auto_functions.h"])
assert os.path.isfile("/tmp/auto_functions.cpp")
assert os.path.isfile("/tmp/auto_functions.h")
def test_transpiler_creates_files_with_format(self):
self.clean_auto_funcs()
transpiler.main(["--format", "--output-cpp", "/tmp/auto_functions.cpp", "--output-header", "/tmp/auto_functions.h"])
assert os.path.isfile("/tmp/auto_functions.cpp")
assert os.path.isfile("/tmp/auto_functions.h")
def test_transpiler_uses_input_files(self):
self.clean_auto_funcs()
transpiler.main(["--format", "--output-cpp", "/tmp/auto_functions.cpp", "--output-header", "/tmp/auto_functions.h", "--input-files", "tests/files/transpiler/auto_functions/wait.func"])
assert os.path.isfile("/tmp/auto_functions.cpp")
assert os.path.isfile("/tmp/auto_functions.h")
assert "Wait" in open("/tmp/auto_functions.h").read()
assert "Wait" in open("/tmp/auto_functions.cpp").read()
def test_transpiler_works_from_other_dir(self):
self.clean_auto_funcs()
old_dir = os.getcwd()
os.chdir("/tmp")
transpiler.main(["--format", "--input-files", old_dir + "/tests/files/transpiler/auto_functions/wait.func"])
assert os.path.isfile("/tmp/auto_functions.cpp")
assert os.path.isfile("/tmp/auto_functions.h")
assert "Wait" in open("/tmp/auto_functions.h").read()
assert "Wait" in open("/tmp/auto_functions.cpp").read()
os.chdir(old_dir)
def test_transpiler_handles_whitespace(self):
self.clean_auto_funcs()
transpiler.main(["--input-files", "tests/files/transpiler/auto_functions/with_whitespace.func", "--output-cpp", "/tmp/auto_functions.cpp", "--output-header", "/tmp/auto_functions.h"])
assert os.path.isfile("/tmp/auto_functions.cpp")
assert os.path.isfile("/tmp/auto_functions.h")
assert "Whitespace" in open("/tmp/auto_functions.h").read()
assert "Whitespace" in open("/tmp/auto_functions.cpp").read()
for x in range(1,6):
assert "//PASS" + str(x) in open("/tmp/auto_functions.cpp").read()
def test_run_transpiler_as_process(self):
self.clean_auto_funcs()
subprocess.check_call(["./transpiler.py", "--format", "--input-files", "tests/files/transpiler/auto_functions/wait.func", "--output-cpp", "/tmp/auto_functions.cpp", "--output-header", "/tmp/auto_functions.h"])
assert os.path.isfile("/tmp/auto_functions.cpp")
assert os.path.isfile("/tmp/auto_functions.h")
assert "Wait" in open("/tmp/auto_functions.h").read()
assert "Wait" in open("/tmp/auto_functions.cpp").read()
def test_transpiler_silent_flag(self):
self.clean_auto_funcs()
assert subprocess.check_output(["./transpiler.py", "-qqq", "--output-cpp", "/tmp/auto_functions.cpp", "--output-header", "/tmp/auto_functions.h"]).decode("utf-8") == ""
def test_transpiler_deterministic_outupt(self):
self.clean_auto_funcs()
os.mkdir("/tmp/auto_files")
for n in range(1,10):
os.mkdir("/tmp/auto_files/{}".format(n))
transpiler.main(["--format", "--input-files", "tests/files/transpiler/auto_functions_compile/mock.func", "--output-cpp", "/tmp/auto_files/{}.cpp".format(n), "--output-header", "/tmp/auto_files/{}.h".format(n)])
for n in range(1,9):
assert subprocess.check_output(["diff", "/tmp/auto_files/{}".format(n), "/tmp/auto_files/{}".format(n+1)]).decode("utf-8") == ""
| mit | -1,578,675,538,517,193,700 | 40.969388 | 222 | 0.621201 | false |
jigarmistry/sofi | sofi/ui/label.py | 2 | 1145 | from .element import Element
from .span import Span
class Label(Element):
"""Implements a Bootstrap Label <span class="label"> tag"""
SEVERITIES = { 'danger': 'label-danger',
'success': 'label-success',
'info': 'label-info',
'warning': 'label-warning',
'primary': 'label-primary',
'default': 'label-default'
}
def __init__(self, text=None, severity=None, cl=None, ident=None, style=None, attrs=None):
super().__init__(cl=cl, ident=ident, style=style, attrs=attrs)
self.text = text
self.severity = severity
def __repr__(self):
return "<Label(text='" + self.text + "')>"
def __str__(self):
classes = [ "label" ]
if self.severity:
classes.append(Label.SEVERITIES[self.severity])
else:
classes.append(Label.SEVERITIES['default'])
if self.cl:
classes.append(self.cl)
return str(Span(text=self.text, cl=" ".join(classes), ident=self.ident,
style=self.style, attrs=self.attrs))
| mit | 3,120,153,539,774,151,700 | 29.945946 | 94 | 0.534498 | false |
malaterre/ITK | Examples/DataRepresentation/Image/ImageToArray.py | 2 | 1238 | #==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
from InsightToolkit import *
from numarray import *
from sys import argv
reader = itkImageFileReaderUC2_New()
connector = itkPyBufferUC2_New()
reader.SetFileName( argv[1] )
reader.Update()
print "ready to convert image into array"
buffer = connector.GetArrayFromImage( reader.GetOutput() )
writer = itkImageFileWriterUC2_New()
writer.SetFileName( argv[2] )
print "ready to convert array into image"
writer.SetInput( connector.GetImageFromArray( buffer ) )
writer.Update()
| apache-2.0 | -5,761,217,888,762,148,000 | 26.511111 | 77 | 0.652666 | false |
werbk/task-7.20 | generator/contact.py | 4 | 2518 |
from fixture.TestBase import random_string
from tests_contract.contact_helper import Contact
import os.path
import json
import getopt
import sys
import jsonpickle
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contact", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = 'data/contacts.json'
for o, a in opts:
if o == "-n":
n=int(a)
elif o == "-f":
f = str(a)
test_data = [
Contact(first_name='', middle_name='', last_name='', nickname='', title='',
company_name='', address_name='', home='', mobile='', work='', fax='',
email1='', email2='', email3='', homepage='', address='', phone='', notes='',
contact_name='')]+[Contact(first_name=random_string('first_name', 3),
middle_name=random_string('middle_name', 3),
last_name=random_string('last_name', 3),
nickname=random_string('nickname', 3),
title=random_string('title', 3),
company_name=random_string('company_name', 3),
address_name=random_string('address_name', 3),
home=random_string('home', 3),
mobile=random_string('mobile', 3),
work=random_string('work', 3),
fax=random_string('fax', 3),
email1=random_string('email1', 3),
email2=random_string('email2', 3),
email3=random_string('email3', 3),
homepage=random_string('homepage', 3),
address=random_string('address', 3),
phone=random_string('phone', 3),
notes=random_string('notes', 3),
contact_name=random_string('contact_name', 3)) for i in range(n)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, 'w') as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(test_data)) | apache-2.0 | -1,664,942,867,452,397,800 | 45.648148 | 111 | 0.437252 | false |
PyCQA/pydocstyle | src/pydocstyle/checker.py | 3 | 42125 | """Parsed source code checkers for docstring violations."""
import ast
import string
import tokenize as tk
from collections import namedtuple
from itertools import chain, takewhile
from re import compile as re
from . import violations
from .config import IllegalConfiguration
from .parser import (
AllError,
Class,
Definition,
Function,
Method,
Module,
NestedClass,
NestedFunction,
Package,
ParseError,
Parser,
StringIO,
)
from .utils import (
common_prefix_length,
is_blank,
log,
pairwise,
strip_non_alphanumeric,
)
from .wordlists import IMPERATIVE_BLACKLIST, IMPERATIVE_VERBS, stem
__all__ = ('check',)
def check_for(kind, terminal=False):
def decorator(f):
f._check_for = kind
f._terminal = terminal
return f
return decorator
class ConventionChecker:
"""Checker for PEP 257, NumPy and Google conventions.
D10x: Missing docstrings
D20x: Whitespace issues
D30x: Docstring formatting
D40x: Docstring content issues
"""
NUMPY_SECTION_NAMES = (
'Short Summary',
'Extended Summary',
'Parameters',
'Returns',
'Yields',
'Other Parameters',
'Raises',
'See Also',
'Notes',
'References',
'Examples',
'Attributes',
'Methods',
)
GOOGLE_SECTION_NAMES = (
'Args',
'Arguments',
'Attention',
'Attributes',
'Caution',
'Danger',
'Error',
'Example',
'Examples',
'Hint',
'Important',
'Keyword Args',
'Keyword Arguments',
'Methods',
'Note',
'Notes',
'Return',
'Returns',
'Raises',
'References',
'See Also',
'Tip',
'Todo',
'Warning',
'Warnings',
'Warns',
'Yield',
'Yields',
)
# Examples that will be matched -
# " random: Test" where random will be captured as the param
# " random : test" where random will be captured as the param
# " random_t (Test) : test " where random_t will be captured as the param
# Matches anything that fulfills all the following conditions:
GOOGLE_ARGS_REGEX = re(
# Begins with 0 or more whitespace characters
r"^\s*"
# Followed by 1 or more unicode chars, numbers or underscores
# The above is captured as the first group as this is the paramater name.
r"(\w+)"
# Followed by 0 or more whitespace characters
r"\s*"
# Matches patterns contained within round brackets.
# The `.*?`matches any sequence of characters in a non-greedy
# way (denoted by the `*?`)
r"(\(.*?\))?"
# Followed by 0 or more whitespace chars
r"\s*"
# Followed by a colon
r":"
# Followed by 1 or more characters - which is the docstring for the parameter
".+"
)
def check_source(
self,
source,
filename,
ignore_decorators=None,
ignore_inline_noqa=False,
):
module = parse(StringIO(source), filename)
for definition in module:
for this_check in self.checks:
terminate = False
if isinstance(definition, this_check._check_for):
skipping_all = definition.skipped_error_codes == 'all'
decorator_skip = ignore_decorators is not None and any(
len(ignore_decorators.findall(dec.name)) > 0
for dec in definition.decorators
)
if (
ignore_inline_noqa or not skipping_all
) and not decorator_skip:
error = this_check(
self, definition, definition.docstring
)
else:
error = None
errors = error if hasattr(error, '__iter__') else [error]
for error in errors:
if error is not None and (
ignore_inline_noqa
or error.code not in definition.skipped_error_codes
):
partition = this_check.__doc__.partition('.\n')
message, _, explanation = partition
error.set_context(
explanation=explanation, definition=definition
)
yield error
if this_check._terminal:
terminate = True
break
if terminate:
break
@property
def checks(self):
all = [
this_check
for this_check in vars(type(self)).values()
if hasattr(this_check, '_check_for')
]
return sorted(all, key=lambda this_check: not this_check._terminal)
@check_for(Definition, terminal=True)
def check_docstring_missing(self, definition, docstring):
"""D10{0,1,2,3}: Public definitions should have docstrings.
All modules should normally have docstrings. [...] all functions and
classes exported by a module should also have docstrings. Public
methods (including the __init__ constructor) should also have
docstrings.
Note: Public (exported) definitions are either those with names listed
in __all__ variable (if present), or those that do not start
with a single underscore.
"""
if (
not docstring
and definition.is_public
or docstring
and is_blank(ast.literal_eval(docstring))
):
codes = {
Module: violations.D100,
Class: violations.D101,
NestedClass: violations.D106,
Method: lambda: violations.D105()
if definition.is_magic
else (
violations.D107()
if definition.is_init
else (
violations.D102()
if not definition.is_overload
else None
)
),
NestedFunction: violations.D103,
Function: (
lambda: violations.D103()
if not definition.is_overload
else None
),
Package: violations.D104,
}
return codes[type(definition)]()
@check_for(Definition)
def check_one_liners(self, definition, docstring):
"""D200: One-liner docstrings should fit on one line with quotes.
The closing quotes are on the same line as the opening quotes.
This looks better for one-liners.
"""
if docstring:
lines = ast.literal_eval(docstring).split('\n')
if len(lines) > 1:
non_empty_lines = sum(1 for l in lines if not is_blank(l))
if non_empty_lines == 1:
return violations.D200(len(lines))
@check_for(Function)
def check_no_blank_before(self, function, docstring): # def
"""D20{1,2}: No blank lines allowed around function/method docstring.
There's no blank line either before or after the docstring unless directly
followed by an inner function or class.
"""
if docstring:
before, _, after = function.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield violations.D201(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 0:
# Report a D202 violation if the docstring is followed by a blank line
# and the blank line is not itself followed by an inner function or
# class.
if not (
blanks_after_count == 1
and re(r"\s+(?:(?:class|def|async def)\s|@)").match(after)
):
yield violations.D202(blanks_after_count)
@check_for(Class)
def check_blank_before_after_class(self, class_, docstring):
"""D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
"""
# NOTE: this gives false-positive in this case
# class Foo:
#
# """Docstring."""
#
#
# # comment here
# def foo(): pass
if docstring:
before, _, after = class_.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield violations.D211(blanks_before_count)
if blanks_before_count != 1:
yield violations.D203(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 1:
yield violations.D204(blanks_after_count)
@check_for(Definition)
def check_blank_after_summary(self, definition, docstring):
"""D205: Put one blank line between summary line and description.
Multi-line docstrings consist of a summary line just like a one-line
docstring, followed by a blank line, followed by a more elaborate
description. The summary line may be used by automatic indexing tools;
it is important that it fits on one line and is separated from the
rest of the docstring by a blank line.
"""
if docstring:
lines = ast.literal_eval(docstring).strip().split('\n')
if len(lines) > 1:
post_summary_blanks = list(map(is_blank, lines[1:]))
blanks_count = sum(takewhile(bool, post_summary_blanks))
if blanks_count != 1:
return violations.D205(blanks_count)
@staticmethod
def _get_docstring_indent(definition, docstring):
"""Return the indentation of the docstring's opening quotes."""
before_docstring, _, _ = definition.source.partition(docstring)
_, _, indent = before_docstring.rpartition('\n')
return indent
@check_for(Definition)
def check_indent(self, definition, docstring):
"""D20{6,7,8}: The entire docstring should be indented same as code.
The entire docstring is indented the same as the quotes at its
first line.
"""
if docstring:
indent = self._get_docstring_indent(definition, docstring)
lines = docstring.split('\n')
if len(lines) > 1:
# First line and line continuations need no indent.
lines = [
line
for i, line in enumerate(lines)
if i and not lines[i - 1].endswith('\\')
]
indents = [leading_space(l) for l in lines if not is_blank(l)]
if set(' \t') == set(''.join(indents) + indent):
yield violations.D206()
if (len(indents) > 1 and min(indents[:-1]) > indent) or (
len(indents) > 0 and indents[-1] > indent
):
yield violations.D208()
if len(indents) > 0 and min(indents) < indent:
yield violations.D207()
@check_for(Definition)
def check_newline_after_last_paragraph(self, definition, docstring):
"""D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves.
"""
if docstring:
lines = [
l
for l in ast.literal_eval(docstring).split('\n')
if not is_blank(l)
]
if len(lines) > 1:
if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
return violations.D209()
@check_for(Definition)
def check_surrounding_whitespaces(self, definition, docstring):
"""D210: No whitespaces allowed surrounding docstring text."""
if docstring:
lines = ast.literal_eval(docstring).split('\n')
if (
lines[0].startswith(' ')
or len(lines) == 1
and lines[0].endswith(' ')
):
return violations.D210()
@check_for(Definition)
def check_multi_line_summary_start(self, definition, docstring):
"""D21{2,3}: Multi-line docstring summary style check.
A multi-line docstring summary should start either at the first,
or separately at the second line of a docstring.
"""
if docstring:
start_triple = [
'"""',
"'''",
'u"""',
"u'''",
'r"""',
"r'''",
'ur"""',
"ur'''",
]
lines = ast.literal_eval(docstring).split('\n')
if len(lines) > 1:
first = docstring.split("\n")[0].strip().lower()
if first in start_triple:
return violations.D212()
else:
return violations.D213()
@check_for(Definition)
def check_triple_double_quotes(self, definition, docstring):
r'''D300: Use """triple double quotes""".
For consistency, always use """triple double quotes""" around
docstrings. Use r"""raw triple double quotes""" if you use any
backslashes in your docstrings. For Unicode docstrings, use
u"""Unicode triple-quoted strings""".
Note: Exception to this is made if the docstring contains
""" quotes in its body.
'''
if docstring:
if '"""' in ast.literal_eval(docstring):
# Allow ''' quotes if docstring contains """, because
# otherwise """ quotes could not be expressed inside
# docstring. Not in PEP 257.
regex = re(r"[uU]?[rR]?'''[^'].*")
else:
regex = re(r'[uU]?[rR]?"""[^"].*')
if not regex.match(docstring):
illegal_matcher = re(r"""[uU]?[rR]?("+|'+).*""")
illegal_quotes = illegal_matcher.match(docstring).group(1)
return violations.D300(illegal_quotes)
@check_for(Definition)
def check_backslashes(self, definition, docstring):
r'''D301: Use r""" if any backslashes in a docstring.
Use r"""raw triple double quotes""" if you use any backslashes
(\) in your docstrings.
Exceptions are backslashes for line-continuation and unicode escape
sequences \N... and \u... These are considered intended unescaped
content in docstrings.
'''
# Just check that docstring is raw, check_triple_double_quotes
# ensures the correct quotes.
if (
docstring
and re(r'\\[^\nuN]').search(docstring)
and not docstring.startswith(('r', 'ur'))
):
return violations.D301()
@staticmethod
def _check_ends_with(docstring, chars, violation):
"""First line ends with one of `chars`.
First line of the docstring should end with one of the characters in `chars`.
`chars` supports either a `str` or an `Iterable[str]`. If the condition is
evaluated to be false, it raises `violation`.
"""
if docstring:
summary_line = ast.literal_eval(docstring).strip().split('\n')[0]
if not summary_line.endswith(chars):
return violation(summary_line[-1])
@check_for(Definition)
def check_ends_with_period(self, definition, docstring):
"""D400: First line should end with a period.
The [first line of a] docstring is a phrase ending in a period.
"""
return self._check_ends_with(docstring, '.', violations.D400)
@check_for(Definition)
def check_ends_with_punctuation(self, definition, docstring):
"""D415: should end with proper punctuation.
The [first line of a] docstring is a phrase ending in a period,
question mark, or exclamation point
"""
return self._check_ends_with(
docstring, ('.', '!', '?'), violations.D415
)
@check_for(Function)
def check_imperative_mood(self, function, docstring): # def context
"""D401: First line should be in imperative mood: 'Do', not 'Does'.
[Docstring] prescribes the function or method's effect as a command:
("Do this", "Return that"), not as a description; e.g. don't write
"Returns the pathname ...".
"""
if docstring and not function.is_test:
stripped = ast.literal_eval(docstring).strip()
if stripped:
first_word = strip_non_alphanumeric(stripped.split()[0])
check_word = first_word.lower()
if check_word in IMPERATIVE_BLACKLIST:
return violations.D401b(first_word)
correct_forms = IMPERATIVE_VERBS.get(stem(check_word))
if correct_forms and check_word not in correct_forms:
best = max(
correct_forms,
key=lambda f: common_prefix_length(check_word, f),
)
return violations.D401(best.capitalize(), first_word)
@check_for(Function)
def check_no_signature(self, function, docstring): # def context
"""D402: First line should not be function's or method's "signature".
The one-line docstring should NOT be a "signature" reiterating the
function/method parameters (which can be obtained by introspection).
"""
if docstring:
first_line = ast.literal_eval(docstring).strip().split('\n')[0]
if function.name + '(' in first_line.replace(' ', ''):
return violations.D402()
@check_for(Function)
def check_capitalized(self, function, docstring):
"""D403: First word of the first line should be properly capitalized.
The [first line of a] docstring is a phrase ending in a period.
"""
if docstring:
first_word = ast.literal_eval(docstring).split()[0]
if first_word == first_word.upper():
return
for char in first_word:
if char not in string.ascii_letters and char != "'":
return
if first_word != first_word.capitalize():
return violations.D403(first_word.capitalize(), first_word)
@check_for(Function)
def check_if_needed(self, function, docstring):
"""D418: Function decorated with @overload shouldn't contain a docstring.
Functions that are decorated with @overload are definitions,
and are for the benefit of the type checker only,
since they will be overwritten by the non-@overload-decorated definition.
"""
if docstring and function.is_overload:
return violations.D418()
@check_for(Definition)
def check_starts_with_this(self, function, docstring):
"""D404: First word of the docstring should not be `This`.
Docstrings should use short, simple language. They should not begin
with "This class is [..]" or "This module contains [..]".
"""
if not docstring:
return
stripped = ast.literal_eval(docstring).strip()
if not stripped:
return
first_word = strip_non_alphanumeric(stripped.split()[0])
if first_word.lower() == 'this':
return violations.D404()
@staticmethod
def _is_docstring_section(context):
"""Check if the suspected context is really a section header.
Lets have a look at the following example docstring:
'''Title.
Some part of the docstring that specifies what the function
returns. <----- Not a real section name. It has a suffix and the
previous line is not empty and does not end with
a punctuation sign.
This is another line in the docstring. It describes stuff,
but we forgot to add a blank line between it and the section name.
Parameters <-- A real section name. The previous line ends with
---------- a period, therefore it is in a new
grammatical context.
param : int
examples : list <------- Not a section - previous line doesn't end
A list of examples. with punctuation.
notes : list <---------- Not a section - there's text after the
A list of notes. colon.
Notes: <--- Suspected as a context because there's a suffix to the
----- section, but it's a colon so it's probably a mistake.
Bla.
'''
To make sure this is really a section we check these conditions:
* There's no suffix to the section name or it's just a colon AND
* The previous line is empty OR it ends with punctuation.
If one of the conditions is true, we will consider the line as
a section name.
"""
section_name_suffix = (
context.line.strip().lstrip(context.section_name.strip()).strip()
)
section_suffix_is_only_colon = section_name_suffix == ':'
punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
prev_line_ends_with_punctuation = any(
context.previous_line.strip().endswith(x) for x in punctuation
)
this_line_looks_like_a_section_name = (
is_blank(section_name_suffix) or section_suffix_is_only_colon
)
prev_line_looks_like_end_of_paragraph = (
prev_line_ends_with_punctuation or is_blank(context.previous_line)
)
return (
this_line_looks_like_a_section_name
and prev_line_looks_like_end_of_paragraph
)
@classmethod
def _check_blanks_and_section_underline(
cls, section_name, context, indentation
):
"""D4{07,08,09,12,14}, D215: Section underline checks.
Check for correct formatting for docstring sections. Checks that:
* The line that follows the section name contains
dashes (D40{7,8}).
* The amount of dashes is equal to the length of the section
name (D409).
* The section's content does not begin in the line that follows
the section header (D412).
* The section has no content (D414).
* The indentation of the dashed line is equal to the docstring's
indentation (D215).
"""
blank_lines_after_header = 0
for line in context.following_lines:
if not is_blank(line):
break
blank_lines_after_header += 1
else:
# There are only blank lines after the header.
yield violations.D407(section_name)
yield violations.D414(section_name)
return
non_empty_line = context.following_lines[blank_lines_after_header]
dash_line_found = ''.join(set(non_empty_line.strip())) == '-'
if not dash_line_found:
yield violations.D407(section_name)
if blank_lines_after_header > 0:
yield violations.D412(section_name)
else:
if blank_lines_after_header > 0:
yield violations.D408(section_name)
if non_empty_line.strip() != "-" * len(section_name):
yield violations.D409(
len(section_name),
section_name,
len(non_empty_line.strip()),
)
if leading_space(non_empty_line) > indentation:
yield violations.D215(section_name)
line_after_dashes_index = blank_lines_after_header + 1
# If the line index after the dashes is in range (perhaps we have
# a header + underline followed by another section header).
if line_after_dashes_index < len(context.following_lines):
line_after_dashes = context.following_lines[
line_after_dashes_index
]
if is_blank(line_after_dashes):
rest_of_lines = context.following_lines[
line_after_dashes_index:
]
if not is_blank(''.join(rest_of_lines)):
yield violations.D412(section_name)
else:
yield violations.D414(section_name)
else:
yield violations.D414(section_name)
@classmethod
def _check_common_section(
cls, docstring, definition, context, valid_section_names
):
"""D4{05,10,11,13}, D214: Section name checks.
Check for valid section names. Checks that:
* The section name is properly capitalized (D405).
* The section is not over-indented (D214).
* There's a blank line after the section (D410, D413).
* There's a blank line before the section (D411).
Also yields all the errors from `_check_blanks_and_section_underline`.
"""
indentation = cls._get_docstring_indent(definition, docstring)
capitalized_section = context.section_name.title()
if (
context.section_name not in valid_section_names
and capitalized_section in valid_section_names
):
yield violations.D405(capitalized_section, context.section_name)
if leading_space(context.line) > indentation:
yield violations.D214(capitalized_section)
if not context.following_lines or not is_blank(
context.following_lines[-1]
):
if context.is_last_section:
yield violations.D413(capitalized_section)
else:
yield violations.D410(capitalized_section)
if not is_blank(context.previous_line):
yield violations.D411(capitalized_section)
yield from cls._check_blanks_and_section_underline(
capitalized_section, context, indentation
)
@classmethod
def _check_numpy_section(cls, docstring, definition, context):
"""D406: NumPy-style section name checks.
Check for valid section names. Checks that:
* The section name has no superfluous suffix to it (D406).
Additionally, also yield all violations from `_check_common_section`
which are style-agnostic section checks.
"""
indentation = cls._get_docstring_indent(definition, docstring)
capitalized_section = context.section_name.title()
yield from cls._check_common_section(
docstring, definition, context, cls.NUMPY_SECTION_NAMES
)
suffix = context.line.strip().lstrip(context.section_name)
if suffix:
yield violations.D406(capitalized_section, context.line.strip())
if capitalized_section == "Parameters":
yield from cls._check_parameters_section(
docstring, definition, context
)
@staticmethod
def _check_parameters_section(docstring, definition, context):
"""D417: `Parameters` section check for numpy style.
Check for a valid `Parameters` section. Checks that:
* The section documents all function arguments (D417)
except `self` or `cls` if it is a method.
"""
docstring_args = set()
section_level_indent = leading_space(context.line)
# Join line continuations, then resplit by line.
content = (
'\n'.join(context.following_lines).replace('\\\n', '').split('\n')
)
for current_line, next_line in zip(content, content[1:]):
# All parameter definitions in the Numpy parameters
# section must be at the same indent level as the section
# name.
# Also, we ensure that the following line is indented,
# and has some string, to ensure that the parameter actually
# has a description.
# This means, this is a parameter doc with some description
if (
(leading_space(current_line) == section_level_indent)
and (
len(leading_space(next_line))
> len(leading_space(current_line))
)
and next_line.strip()
):
# In case the parameter has type definitions, it
# will have a colon
if ":" in current_line:
parameters, parameter_type = current_line.split(":", 1)
# Else, we simply have the list of parameters defined
# on the current line.
else:
parameters = current_line.strip()
# Numpy allows grouping of multiple parameters of same
# type in the same line. They are comma separated.
parameter_list = parameters.split(",")
for parameter in parameter_list:
docstring_args.add(parameter.strip())
yield from ConventionChecker._check_missing_args(
docstring_args, definition
)
@staticmethod
def _check_args_section(docstring, definition, context):
"""D417: `Args` section checks.
Check for a valid `Args` or `Argument` section. Checks that:
* The section documents all function arguments (D417)
except `self` or `cls` if it is a method.
"""
docstring_args = set()
for line in context.following_lines:
match = ConventionChecker.GOOGLE_ARGS_REGEX.match(line)
if match:
docstring_args.add(match.group(1))
yield from ConventionChecker._check_missing_args(
docstring_args, definition
)
@staticmethod
def _check_missing_args(docstring_args, definition):
"""D417: Yield error for missing arguments in docstring.
Given a list of arguments found in the docstring and the
callable definition, it checks if all the arguments of the
callable are present in the docstring, else it yields a
D417 with a list of missing arguments.
"""
if isinstance(definition, Function):
function_args = get_function_args(definition.source)
# If the method isn't static, then we skip the first
# positional argument as it is `cls` or `self`
if definition.kind == 'method' and not definition.is_static:
function_args = function_args[1:]
# Filtering out any arguments prefixed with `_` marking them
# as private.
function_args = [
arg_name
for arg_name in function_args
if not is_def_arg_private(arg_name)
]
missing_args = set(function_args) - docstring_args
if missing_args:
yield violations.D417(
", ".join(sorted(missing_args)), definition.name
)
@classmethod
def _check_google_section(cls, docstring, definition, context):
"""D416: Google-style section name checks.
Check for valid section names. Checks that:
* The section does not contain any blank line between its name
and content (D412).
* The section is not empty (D414).
* The section name has colon as a suffix (D416).
Additionally, also yield all violations from `_check_common_section`
which are style-agnostic section checks.
"""
capitalized_section = context.section_name.title()
yield from cls._check_common_section(
docstring, definition, context, cls.GOOGLE_SECTION_NAMES
)
suffix = context.line.strip().lstrip(context.section_name)
if suffix != ":":
yield violations.D416(
capitalized_section + ":", context.line.strip()
)
if capitalized_section in ("Args", "Arguments"):
yield from cls._check_args_section(docstring, definition, context)
@staticmethod
def _get_section_contexts(lines, valid_section_names):
"""Generate `SectionContext` objects for valid sections.
Given a list of `valid_section_names`, generate an
`Iterable[SectionContext]` which provides:
* Section Name
* String value of the previous line
* The section line
* Following lines till the next section
* Line index of the beginning of the section in the docstring
* Boolean indicating whether the section is the last section.
for each valid section.
"""
lower_section_names = [s.lower() for s in valid_section_names]
def _suspected_as_section(_line):
result = get_leading_words(_line.lower())
return result in lower_section_names
# Finding our suspects.
suspected_section_indices = [
i for i, line in enumerate(lines) if _suspected_as_section(line)
]
SectionContext = namedtuple(
'SectionContext',
(
'section_name',
'previous_line',
'line',
'following_lines',
'original_index',
'is_last_section',
),
)
# First - create a list of possible contexts. Note that the
# `following_lines` member is until the end of the docstring.
contexts = (
SectionContext(
get_leading_words(lines[i].strip()),
lines[i - 1],
lines[i],
lines[i + 1 :],
i,
False,
)
for i in suspected_section_indices
)
# Now that we have manageable objects - rule out false positives.
contexts = (
c for c in contexts if ConventionChecker._is_docstring_section(c)
)
# Now we shall trim the `following lines` field to only reach the
# next section name.
for a, b in pairwise(contexts, None):
end = -1 if b is None else b.original_index
yield SectionContext(
a.section_name,
a.previous_line,
a.line,
lines[a.original_index + 1 : end],
a.original_index,
b is None,
)
def _check_numpy_sections(self, lines, definition, docstring):
"""NumPy-style docstring sections checks.
Check the general format of a sectioned docstring:
'''This is my one-liner.
Short Summary
-------------
This is my summary.
Returns
-------
None.
'''
Section names appear in `NUMPY_SECTION_NAMES`.
Yields all violation from `_check_numpy_section` for each valid
Numpy-style section.
"""
found_any_numpy_section = False
for ctx in self._get_section_contexts(lines, self.NUMPY_SECTION_NAMES):
found_any_numpy_section = True
yield from self._check_numpy_section(docstring, definition, ctx)
return found_any_numpy_section
def _check_google_sections(self, lines, definition, docstring):
"""Google-style docstring section checks.
Check the general format of a sectioned docstring:
'''This is my one-liner.
Note:
This is my summary.
Returns:
None.
'''
Section names appear in `GOOGLE_SECTION_NAMES`.
Yields all violation from `_check_google_section` for each valid
Google-style section.
"""
for ctx in self._get_section_contexts(
lines, self.GOOGLE_SECTION_NAMES
):
yield from self._check_google_section(docstring, definition, ctx)
@check_for(Definition)
def check_docstring_sections(self, definition, docstring):
"""Check for docstring sections."""
if not docstring:
return
lines = docstring.split("\n")
if len(lines) < 2:
return
found_numpy = yield from self._check_numpy_sections(
lines, definition, docstring
)
if not found_numpy:
yield from self._check_google_sections(
lines, definition, docstring
)
parse = Parser()
def check(
filenames,
select=None,
ignore=None,
ignore_decorators=None,
ignore_inline_noqa=False,
):
"""Generate docstring errors that exist in `filenames` iterable.
By default, the PEP-257 convention is checked. To specifically define the
set of error codes to check for, supply either `select` or `ignore` (but
not both). In either case, the parameter should be a collection of error
code strings, e.g., {'D100', 'D404'}.
When supplying `select`, only specified error codes will be reported.
When supplying `ignore`, all error codes which were not specified will be
reported.
Note that ignored error code refer to the entire set of possible
error codes, which is larger than just the PEP-257 convention. To your
convenience, you may use `pydocstyle.violations.conventions.pep257` as
a base set to add or remove errors from.
`ignore_inline_noqa` controls if `# noqa` comments are respected or not.
Examples
---------
>>> check(['pydocstyle.py'])
<generator object check at 0x...>
>>> check(['pydocstyle.py'], select=['D100'])
<generator object check at 0x...>
>>> check(['pydocstyle.py'], ignore=conventions.pep257 - {'D100'})
<generator object check at 0x...>
"""
if select is not None and ignore is not None:
raise IllegalConfiguration(
'Cannot pass both select and ignore. '
'They are mutually exclusive.'
)
elif select is not None:
checked_codes = select
elif ignore is not None:
checked_codes = list(
set(violations.ErrorRegistry.get_error_codes()) - set(ignore)
)
else:
checked_codes = violations.conventions.pep257
for filename in filenames:
log.info('Checking file %s.', filename)
try:
with tk.open(filename) as file:
source = file.read()
for error in ConventionChecker().check_source(
source, filename, ignore_decorators, ignore_inline_noqa
):
code = getattr(error, 'code', None)
if code in checked_codes:
yield error
except (OSError, AllError, ParseError) as error:
log.warning('Error in file %s: %s', filename, error)
yield error
except tk.TokenError:
yield SyntaxError('invalid syntax in file %s' % filename)
def is_ascii(string):
"""Return a boolean indicating if `string` only has ascii characters."""
return all(ord(char) < 128 for char in string)
def leading_space(string):
"""Return any leading space from `string`."""
return re(r'\s*').match(string).group()
def get_leading_words(line):
"""Return any leading set of words from `line`.
For example, if `line` is " Hello world!!!", returns "Hello world".
"""
result = re(r"[\w ]+").match(line.strip())
if result is not None:
return result.group()
def is_def_arg_private(arg_name):
"""Return a boolean indicating if the argument name is private."""
return arg_name.startswith("_")
def get_function_args(function_source):
"""Return the function arguments given the source-code string."""
# We are stripping the whitespace from the left of the
# function source.
# This is so that if the docstring has incorrectly
# indented lines, which are at a lower indent than the
# function source, we still dedent the source correctly
# and the AST parser doesn't throw an error.
try:
function_arg_node = ast.parse(function_source.lstrip()).body[0].args
except SyntaxError:
# If we still get a syntax error, we don't want the
# the checker to crash. Instead we just return a blank list.
return []
arg_nodes = function_arg_node.args
kwonly_arg_nodes = function_arg_node.kwonlyargs
return [arg_node.arg for arg_node in chain(arg_nodes, kwonly_arg_nodes)]
| mit | -6,593,910,018,758,575,000 | 35.694251 | 86 | 0.562777 | false |
FX31337/FX-BT-Scripts | mt_modify.py | 1 | 5633 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import sys
import struct
from copy import copy
from bstruct import BStruct
from bstruct_defs import SymbolsRaw
#
# Exceptions for internal use
#
class WrongStructFormat(Exception):
pass
class NoSuchField(Exception):
pass
class InvalidDataFormat(Exception):
pass
class InvalidArgument(Exception):
pass
#
def modify_field(ss, field_name, value):
"""
Modify the field named 'field_name' in the BStruct 'ss'.
'value' is the new value in string form.
"""
if not isinstance(ss, BStruct):
raise WrongStructFormat()
# Format string that's feed into pack
fmts = None
for (fname, ffmt, *_) in ss._fields:
if fname == field_name:
fmts = ffmt
break
if fmts is None:
raise NoSuchField()
# Try to perform the correct cast.
if fmts[-1] == "c":
raise InvalidArgument("c fields aren't supported yet")
elif fmts[-1] == "s":
value = value.encode("utf-8")
elif fmts[-1] in ["f", "d"]:
value = float(value)
else:
value = int(value, 0)
# Validate the data first
try:
struct.pack(fmts, value)
except struct.error as e:
raise InvalidDataFormat("Invalid data format for field {}".format(field_name))
except:
raise
setattr(ss, field_name, value)
def parse_file(name, strucc):
try:
fp = open(name, "rb")
except OSError as e:
print("Cannot open file '{}' for reading".format(name))
sys.exit(1)
content = []
while True:
buf = fp.read(strucc._size)
if len(buf) != strucc._size:
break
content.append(strucc(buf))
fp.close()
return content
def write_file(name, content):
try:
fp = open(name, "wb")
except OSError as e:
print("Cannot open file '{}' for writing".format(name))
sys.exit(1)
for r in content:
fp.write(r.repack())
fp.close()
def find_in_content(content, field_name, value):
struct_type = type(content[0])
# Make sure the field exists and is a string
ex = [x[0] for x in struct_type._fields if x[0] == field_name and x[1][-1] == "s"]
if len(ex) == 0:
# The field isn't available in this BStruct
raise InvalidArgument(field_name)
for r in content:
v = getattr(r, ex[0])
# Sanitize the value before checking the value
if v.decode("utf-8").rstrip("\0") == value:
return r
raise InvalidArgument(value)
#
# Filetype specific options
#
class SymbolsRawBundle:
name_field = "name"
sort_field = "name"
need_sort = True
if __name__ == "__main__":
# Parse the arguments
argumentParser = argparse.ArgumentParser(add_help=False)
argumentParser.add_argument(
"-i",
"--input-file",
action="store",
dest="inputFile",
help="input file",
required=True,
)
argumentParser.add_argument(
"-t",
"--input-type",
action="store",
dest="inputType",
help="input type",
required=True,
)
argumentParser.add_argument(
"-k",
"--key-group",
action="store",
dest="keyGroup",
help="group key",
required=True,
)
argumentParser.add_argument(
"-d",
"--delete",
action="store_true",
dest="doDelete",
help="Delete this record",
)
argumentParser.add_argument(
"-a",
"--add",
action="store",
dest="doAdd",
help="Add a new record",
default=None,
)
argumentParser.add_argument(
"-m",
"--modify",
action="append",
dest="doModify",
help="Modify the record data",
)
argumentParser.add_argument(
"-h", "--help", action="help", help="Show this help message and exit"
)
args = argumentParser.parse_args()
if args.inputType != "symbolsraw":
print("Invalid input type")
sys.exit(1)
# A bundle keeps track of various options that are filetype-specific
bundle = SymbolsRawBundle
cont = parse_file(args.inputFile, SymbolsRaw)
# Find the key group first
try:
key_group = find_in_content(cont, bundle.name_field, args.keyGroup)
except InvalidArgument as e:
print("Could not find the -k group '{}'".format(args.keyGroup))
sys.exit(1)
if not args.doAdd is None:
# We can't have two symbols with the same name
try:
is_present = find_in_content(cont, bundle.name_field, args.doAdd)
except InvalidArgument as e:
pass
else:
print("The symbol {} is already in the file, cannot overwrite it".format(e))
sys.exit(1)
# Clone the old object and modify its name
new_group = copy(key_group)
modify_field(new_group, bundle.name_field, args.doAdd)
cont.append(new_group)
elif not args.doModify is None:
for opt in args.doModify:
# Options are in the 'name=value' format
val = opt.split("=")
val_name = val[0].strip()
val_value = val[1].strip()
# Perform the modification in place
modify_field(key_group, val_name, val_value)
elif not args.doDelete is None:
cont.remove(key_group)
# Sort the file content if needed
if bundle.need_sort:
cont.sort(key=lambda x: getattr(x, bundle.sort_field))
# Serialize the file
write_file(args.inputFile, cont)
| mit | 2,481,857,460,083,460,600 | 22.668067 | 88 | 0.578022 | false |
motion2015/edx-platform | lms/djangoapps/instructor/tests/test_enrollment.py | 25 | 23157 | # -*- coding: utf-8 -*-
"""
Unit tests for instructor.enrollment methods.
"""
import json
import mock
from abc import ABCMeta
from courseware.models import StudentModule
from django.conf import settings
from django.test import TestCase
from django.utils.translation import get_language
from django.utils.translation import override as override_language
from nose.plugins.attrib import attr
from student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from instructor.enrollment import (
EmailEnrollmentState,
enroll_email,
get_email_params,
reset_student_attempts,
send_beta_role_email,
unenroll_email,
render_message_to_string,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from submissions import api as sub_api
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@attr('shard_1')
class TestSettableEnrollmentState(TestCase):
""" Test the basis class for enrollment tests. """
def setUp(self):
super(TestSettableEnrollmentState, self).setUp()
self.course_key = SlashSeparatedCourseKey('Robot', 'fAKE', 'C-%-se-%-ID')
def test_mes_create(self):
"""
Test SettableEnrollmentState creation of user.
"""
mes = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
# enrollment objects
eobjs = mes.create_user(self.course_key)
ees = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(mes, ees)
class TestEnrollmentChangeBase(TestCase):
"""
Test instructor enrollment administration against database effects.
Test methods in derived classes follow a strict format.
`action` is a function which is run
the test will pass if `action` mutates state from `before_ideal` to `after_ideal`
"""
__metaclass__ = ABCMeta
def setUp(self):
super(TestEnrollmentChangeBase, self).setUp()
self.course_key = SlashSeparatedCourseKey('Robot', 'fAKE', 'C-%-se-%-ID')
def _run_state_change_test(self, before_ideal, after_ideal, action):
"""
Runs a state change test.
`before_ideal` and `after_ideal` are SettableEnrollmentState's
`action` is a function which will be run in the middle.
`action` should transition the world from before_ideal to after_ideal
`action` will be supplied the following arguments (None-able arguments)
`email` is an email string
"""
# initialize & check before
print "checking initialization..."
eobjs = before_ideal.create_user(self.course_key)
before = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(before, before_ideal)
# do action
print "running action..."
action(eobjs.email)
# check after
print "checking effects..."
after = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(after, after_ideal)
@attr('shard_1')
class TestInstructorEnrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.enroll_email """
def test_enroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_again(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_again(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=True)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_change_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=False)
return self._run_state_change_test(before_ideal, after_ideal, action)
@attr('shard_1')
class TestInstructorUnenrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.unenroll_email """
def test_unenroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_notenrolled(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_disallow(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_norecord(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
@attr('shard_1')
class TestInstructorEnrollmentStudentModule(ModuleStoreTestCase):
""" Test student module manipulations. """
def setUp(self):
super(TestInstructorEnrollmentStudentModule, self).setUp()
store = modulestore()
self.user = UserFactory()
self.course = CourseFactory(
name='fake',
org='course',
run='id',
)
# pylint: disable=no-member
self.course_key = self.course.location.course_key
self.parent = ItemFactory(
category="library_content",
# pylint: disable=no-member
user_id=self.user.id,
parent=self.course,
publish_item=True,
modulestore=store,
)
self.child = ItemFactory(
category="html",
# pylint: disable=no-member
user_id=self.user.id,
parent=self.parent,
publish_item=True,
modulestore=store,
)
self.unrelated = ItemFactory(
category="html",
# pylint: disable=no-member
user_id=self.user.id,
parent=self.course,
publish_item=True,
modulestore=store,
)
parent_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
child_state = json.dumps({'attempts': 10, 'whatever': 'things'})
unrelated_state = json.dumps({'attempts': 12, 'brains': 'zombie'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.parent.location,
state=parent_state,
)
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.child.location,
state=child_state,
)
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.unrelated.location,
state=unrelated_state,
)
def test_reset_student_attempts(self):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=msk,
state=original_state
)
# lambda to reload the module state from the database
module = lambda: StudentModule.objects.get(student=self.user, course_id=self.course_key, module_state_key=msk)
self.assertEqual(json.loads(module().state)['attempts'], 32)
reset_student_attempts(self.course_key, self.user, msk)
self.assertEqual(json.loads(module().state)['attempts'], 0)
def test_delete_student_attempts(self):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=msk,
state=original_state
)
self.assertEqual(
StudentModule.objects.filter(
student=self.user,
course_id=self.course_key,
module_state_key=msk
).count(), 1)
reset_student_attempts(self.course_key, self.user, msk, delete_module=True)
self.assertEqual(
StudentModule.objects.filter(
student=self.user,
course_id=self.course_key,
module_state_key=msk
).count(), 0)
def test_delete_submission_scores(self):
user = UserFactory()
problem_location = self.course_key.make_usage_key('dummy', 'module')
# Create a student module for the user
StudentModule.objects.create(
student=user,
course_id=self.course_key,
module_state_key=problem_location,
state=json.dumps({})
)
# Create a submission and score for the student using the submissions API
student_item = {
'student_id': anonymous_id_for_user(user, self.course_key),
'course_id': self.course_key.to_deprecated_string(),
'item_id': problem_location.to_deprecated_string(),
'item_type': 'openassessment'
}
submission = sub_api.create_submission(student_item, 'test answer')
sub_api.set_score(submission['uuid'], 1, 2)
# Delete student state using the instructor dash
reset_student_attempts(
self.course_key, user, problem_location,
delete_module=True
)
# Verify that the student's scores have been reset in the submissions API
score = sub_api.get_score(student_item)
self.assertIs(score, None)
def get_state(self, location):
"""Reload and grab the module state from the database"""
return StudentModule.objects.get(
student=self.user, course_id=self.course_key, module_state_key=location
).state
def test_reset_student_attempts_children(self):
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(parent_state['attempts'], 32)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 10)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
reset_student_attempts(self.course_key, self.user, self.parent.location)
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(json.loads(self.get_state(self.parent.location))['attempts'], 0)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 0)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
def test_delete_submission_scores_attempts_children(self):
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(parent_state['attempts'], 32)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 10)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
reset_student_attempts(self.course_key, self.user, self.parent.location, delete_module=True)
self.assertRaises(StudentModule.DoesNotExist, self.get_state, self.parent.location)
self.assertRaises(StudentModule.DoesNotExist, self.get_state, self.child.location)
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
class EnrollmentObjects(object):
"""
Container for enrollment objects.
`email` - student email
`user` - student User object
`cenr` - CourseEnrollment object
`cea` - CourseEnrollmentAllowed object
Any of the objects except email can be None.
"""
def __init__(self, email, user, cenr, cea):
self.email = email
self.user = user
self.cenr = cenr
self.cea = cea
class SettableEnrollmentState(EmailEnrollmentState):
"""
Settable enrollment state.
Used for testing state changes.
SettableEnrollmentState can be constructed and then
a call to create_user will make objects which
correspond to the state represented in the SettableEnrollmentState.
"""
def __init__(self, user=False, enrollment=False, allowed=False, auto_enroll=False): # pylint: disable=super-init-not-called
self.user = user
self.enrollment = enrollment
self.allowed = allowed
self.auto_enroll = auto_enroll
def __eq__(self, other):
return self.to_dict() == other.to_dict()
def __neq__(self, other):
return not self == other
def create_user(self, course_id=None):
"""
Utility method to possibly create and possibly enroll a user.
Creates a state matching the SettableEnrollmentState properties.
Returns a tuple of (
email,
User, (optionally None)
CourseEnrollment, (optionally None)
CourseEnrollmentAllowed, (optionally None)
)
"""
# if self.user=False, then this will just be used to generate an email.
email = "[email protected]"
if self.user:
user = UserFactory()
email = user.email
if self.enrollment:
cenr = CourseEnrollment.enroll(user, course_id)
return EnrollmentObjects(email, user, cenr, None)
else:
return EnrollmentObjects(email, user, None, None)
elif self.allowed:
cea = CourseEnrollmentAllowed.objects.create(
email=email,
course_id=course_id,
auto_enroll=self.auto_enroll,
)
return EnrollmentObjects(email, None, None, cea)
else:
return EnrollmentObjects(email, None, None, None)
@attr('shard_1')
class TestSendBetaRoleEmail(TestCase):
"""
Test edge cases for `send_beta_role_email`
"""
def setUp(self):
super(TestSendBetaRoleEmail, self).setUp()
self.user = UserFactory.create()
self.email_params = {'course': 'Robot Super Course'}
def test_bad_action(self):
bad_action = 'beta_tester'
error_msg = "Unexpected action received '{}' - expected 'add' or 'remove'".format(bad_action)
with self.assertRaisesRegexp(ValueError, error_msg):
send_beta_role_email(bad_action, self.user, self.email_params)
@attr('shard_1')
class TestGetEmailParams(ModuleStoreTestCase):
"""
Test what URLs the function get_email_params returns under different
production-like conditions.
"""
def setUp(self):
super(TestGetEmailParams, self).setUp()
self.course = CourseFactory.create()
# Explicitly construct what we expect the course URLs to be
site = settings.SITE_NAME
self.course_url = u'https://{}/courses/{}/'.format(
site,
self.course.id.to_deprecated_string()
)
self.course_about_url = self.course_url + 'about'
self.registration_url = u'https://{}/register'.format(
site,
)
def test_normal_params(self):
# For a normal site, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
result = get_email_params(self.course, False)
self.assertEqual(result['auto_enroll'], False)
self.assertEqual(result['course_about_url'], self.course_about_url)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
def test_marketing_params(self):
# For a site with a marketing front end, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
result = get_email_params(self.course, True)
self.assertEqual(result['auto_enroll'], True)
# We should *not* get a course about url (LMS doesn't know what the marketing site URLs are)
self.assertEqual(result['course_about_url'], None)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
@attr('shard_1')
class TestRenderMessageToString(ModuleStoreTestCase):
"""
Test that email templates can be rendered in a language chosen manually.
"""
def setUp(self):
super(TestRenderMessageToString, self).setUp()
self.subject_template = 'emails/enroll_email_allowedsubject.txt'
self.message_template = 'emails/enroll_email_allowedmessage.txt'
self.course = CourseFactory.create()
def get_email_params(self):
"""
Returns a dictionary of parameters used to render an email.
"""
email_params = get_email_params(self.course, True)
email_params["email_address"] = "[email protected]"
email_params["full_name"] = "Jean Reno"
return email_params
def get_subject_and_message(self, language):
"""
Returns the subject and message rendered in the specified language.
"""
return render_message_to_string(
self.subject_template,
self.message_template,
self.get_email_params(),
language=language
)
def test_subject_and_message_translation(self):
subject, message = self.get_subject_and_message('fr')
language_after_rendering = get_language()
you_have_been_invited_in_french = u"Vous avez été invité"
self.assertIn(you_have_been_invited_in_french, subject)
self.assertIn(you_have_been_invited_in_french, message)
self.assertEqual(settings.LANGUAGE_CODE, language_after_rendering)
def test_platform_language_is_used_for_logged_in_user(self):
with override_language('zh_CN'): # simulate a user login
subject, message = self.get_subject_and_message(None)
self.assertIn("You have been", subject)
self.assertIn("You have been", message)
| agpl-3.0 | 6,489,823,800,440,749,000 | 33.923077 | 128 | 0.621145 | false |
solashirai/edx-platform | openedx/core/djangoapps/credit/models.py | 3 | 25622 | # -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
from collections import defaultdict
import datetime
import logging
from config_models.models import ConfigurationModel
from django.conf import settings
from django.core.cache import cache
from django.core.validators import RegexValidator
from django.db import models, transaction, IntegrityError
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy, ugettext as _
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
import pytz
from simple_history.models import HistoricalRecords
from xmodule_django.models import CourseKeyField
CREDIT_PROVIDER_ID_REGEX = r"[a-z,A-Z,0-9,\-]+"
log = logging.getLogger(__name__)
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=CREDIT_PROVIDER_ID_REGEX,
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider list and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provider_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
unicode(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
return unicode(course_key) in credit_courses
@classmethod
def get_credit_course(cls, course_key):
"""
Get the credit course if exists for the given 'course_key'.
Args:
course_key(CourseKey): The course identifier
Raises:
DoesNotExist if no CreditCourse exists for the given course key.
Returns:
CreditCourse if one exists for the given course key.
"""
return cls.objects.get(course_key=course_key, enabled=True)
def __unicode__(self):
"""Unicode representation of the credit course. """
return unicode(self.course_key)
@receiver(models.signals.post_save, sender=CreditCourse)
@receiver(models.signals.post_delete, sender=CreditCourse)
def invalidate_credit_courses_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit courses. """
cache.delete(CreditCourse.CREDIT_COURSES_CACHE_KEY)
class CreditRequirement(TimeStampedModel):
"""
This model represents a credit requirement.
Each requirement is uniquely identified by its 'namespace' and
'name' fields.
The 'name' field stores the unique name or location (in case of XBlock)
for a requirement, which serves as the unique identifier for that
requirement.
The 'display_name' field stores the display name of the requirement.
The 'criteria' field dictionary provides additional information, clients
may need to determine whether a user has satisfied the requirement.
"""
course = models.ForeignKey(CreditCourse, related_name="credit_requirements")
namespace = models.CharField(max_length=255)
name = models.CharField(max_length=255)
display_name = models.CharField(max_length=255, default="")
order = models.PositiveIntegerField(default=0)
criteria = JSONField()
active = models.BooleanField(default=True)
class Meta(object):
unique_together = ('namespace', 'name', 'course')
ordering = ["order"]
@classmethod
def add_or_update_course_requirement(cls, credit_course, requirement, order):
"""
Add requirement to a given course.
Args:
credit_course(CreditCourse): The identifier for credit course
requirement(dict): Requirement dict to be added
Returns:
(CreditRequirement, created) tuple
"""
credit_requirement, created = cls.objects.get_or_create(
course=credit_course,
namespace=requirement["namespace"],
name=requirement["name"],
defaults={
"display_name": requirement["display_name"],
"criteria": requirement["criteria"],
"order": order,
"active": True
}
)
if not created:
credit_requirement.criteria = requirement["criteria"]
credit_requirement.active = True
credit_requirement.order = order
credit_requirement.display_name = requirement["display_name"]
credit_requirement.save()
return credit_requirement, created
@classmethod
def get_course_requirements(cls, course_key, namespace=None, name=None):
"""
Get credit requirements of a given course.
Args:
course_key (CourseKey): The identifier for a course
Keyword Arguments
namespace (str): Optionally filter credit requirements by namespace.
name (str): Optionally filter credit requirements by name.
Returns:
QuerySet of CreditRequirement model
"""
# order credit requirements according to their appearance in courseware
requirements = CreditRequirement.objects.filter(course__course_key=course_key, active=True)
if namespace is not None:
requirements = requirements.filter(namespace=namespace)
if name is not None:
requirements = requirements.filter(name=name)
return requirements
@classmethod
def disable_credit_requirements(cls, requirement_ids):
"""
Mark the given requirements inactive.
Args:
requirement_ids(list): List of ids
Returns:
None
"""
cls.objects.filter(id__in=requirement_ids).update(active=False)
@classmethod
def get_course_requirement(cls, course_key, namespace, name):
"""
Get credit requirement of a given course.
Args:
course_key(CourseKey): The identifier for a course
namespace(str): Namespace of credit course requirements
name(str): Name of credit course requirement
Returns:
CreditRequirement object if exists
"""
try:
return cls.objects.get(
course__course_key=course_key, active=True, namespace=namespace, name=name
)
except cls.DoesNotExist:
return None
class CreditRequirementStatus(TimeStampedModel):
"""
This model represents the status of each requirement.
For a particular credit requirement, a user can either:
1) Have satisfied the requirement (example: approved in-course reverification)
2) Have failed the requirement (example: denied in-course reverification)
3) Neither satisfied nor failed (example: the user hasn't yet attempted in-course reverification).
Cases (1) and (2) are represented by having a CreditRequirementStatus with
the status set to "satisfied" or "failed", respectively.
In case (3), no CreditRequirementStatus record will exist for the requirement and user.
"""
REQUIREMENT_STATUS_CHOICES = (
("satisfied", "satisfied"),
("failed", "failed"),
("declined", "declined"),
)
username = models.CharField(max_length=255, db_index=True)
requirement = models.ForeignKey(CreditRequirement, related_name="statuses")
status = models.CharField(max_length=32, choices=REQUIREMENT_STATUS_CHOICES)
# Include additional information about why the user satisfied or failed
# the requirement. This is specific to the type of requirement.
# For example, the minimum grade requirement might record the user's
# final grade when the user completes the course. This allows us to display
# the grade to users later and to send the information to credit providers.
reason = JSONField(default={})
# Maintain a history of requirement status updates for auditing purposes
history = HistoricalRecords()
class Meta(object):
unique_together = ('username', 'requirement')
@classmethod
def get_statuses(cls, requirements, username):
"""
Get credit requirement statuses of given requirement and username
Args:
requirement(CreditRequirement): The identifier for a requirement
username(str): username of the user
Returns:
Queryset 'CreditRequirementStatus' objects
"""
return cls.objects.filter(requirement__in=requirements, username=username)
@classmethod
@transaction.atomic
def add_or_update_requirement_status(cls, username, requirement, status="satisfied", reason=None):
"""
Add credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
status(str): Status of the requirement
reason(dict): Reason of the status
"""
requirement_status, created = cls.objects.get_or_create(
username=username,
requirement=requirement,
defaults={"reason": reason, "status": status}
)
if not created:
requirement_status.status = status
requirement_status.reason = reason if reason else {}
requirement_status.save()
@classmethod
@transaction.atomic
def remove_requirement_status(cls, username, requirement):
"""
Remove credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
"""
try:
requirement_status = cls.objects.get(username=username, requirement=requirement)
requirement_status.delete()
except cls.DoesNotExist:
log_msg = (
u'The requirement status {requirement} does not exist for username {username}.'.format(
requirement=requirement,
username=username
)
)
log.error(log_msg)
return
def default_deadline_for_credit_eligibility(): # pylint: disable=invalid-name
""" The default deadline to use when creating a new CreditEligibility model. """
return datetime.datetime.now(pytz.UTC) + datetime.timedelta(
days=getattr(settings, "CREDIT_ELIGIBILITY_EXPIRATION_DAYS", 365)
)
class CreditEligibility(TimeStampedModel):
""" A record of a user's eligibility for credit for a specific course. """
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="eligibilities")
# Deadline for when credit eligibility will expire.
# Once eligibility expires, users will no longer be able to purchase
# or request credit.
# We save the deadline as a database field just in case
# we need to override the deadline for particular students.
deadline = models.DateTimeField(
default=default_deadline_for_credit_eligibility,
help_text=ugettext_lazy("Deadline for purchasing and requesting credit.")
)
class Meta(object):
unique_together = ('username', 'course')
verbose_name_plural = "Credit eligibilities"
@classmethod
def update_eligibility(cls, requirements, username, course_key):
"""
Update the user's credit eligibility for a course.
A user is eligible for credit when the user has satisfied
all requirements for credit in the course.
Arguments:
requirements (Queryset): Queryset of `CreditRequirement`s to check.
username (str): Identifier of the user being updated.
course_key (CourseKey): Identifier of the course.
Returns: tuple
"""
# Check all requirements for the course to determine if the user
# is eligible. We need to check all the *requirements*
# (not just the *statuses*) in case the user doesn't yet have
# a status for a particular requirement.
status_by_req = defaultdict(lambda: False)
for status in CreditRequirementStatus.get_statuses(requirements, username):
status_by_req[status.requirement.id] = status.status
is_eligible = all(status_by_req[req.id] == "satisfied" for req in requirements)
# If we're eligible, then mark the user as being eligible for credit.
if is_eligible:
try:
CreditEligibility.objects.create(
username=username,
course=CreditCourse.objects.get(course_key=course_key),
)
return is_eligible, True
except IntegrityError:
return is_eligible, False
else:
return is_eligible, False
@classmethod
def get_user_eligibilities(cls, username):
"""
Returns the eligibilities of given user.
Args:
username(str): Username of the user
Returns:
CreditEligibility queryset for the user
"""
return cls.objects.filter(
username=username,
course__enabled=True,
deadline__gt=datetime.datetime.now(pytz.UTC)
).select_related('course')
@classmethod
def is_user_eligible_for_credit(cls, course_key, username):
"""
Check if the given user is eligible for the provided credit course
Args:
course_key(CourseKey): The course identifier
username(str): The username of the user
Returns:
Bool True if the user eligible for credit course else False
"""
return cls.objects.filter(
course__course_key=course_key,
course__enabled=True,
username=username,
deadline__gt=datetime.datetime.now(pytz.UTC),
).exists()
def __unicode__(self):
"""Unicode representation of the credit eligibility. """
return u"{user}, {course}".format(
user=self.username,
course=self.course.course_key,
)
class CreditRequest(TimeStampedModel):
"""
A request for credit from a particular credit provider.
When a user initiates a request for credit, a CreditRequest record will be created.
Each CreditRequest is assigned a unique identifier so we can find it when the request
is approved by the provider. The CreditRequest record stores the parameters to be sent
at the time the request is made. If the user re-issues the request
(perhaps because the user did not finish filling in forms on the credit provider's site),
the request record will be updated, but the UUID will remain the same.
"""
uuid = models.CharField(max_length=32, unique=True, db_index=True)
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="credit_requests")
provider = models.ForeignKey(CreditProvider, related_name="credit_requests")
parameters = JSONField()
REQUEST_STATUS_PENDING = "pending"
REQUEST_STATUS_APPROVED = "approved"
REQUEST_STATUS_REJECTED = "rejected"
REQUEST_STATUS_CHOICES = (
(REQUEST_STATUS_PENDING, "Pending"),
(REQUEST_STATUS_APPROVED, "Approved"),
(REQUEST_STATUS_REJECTED, "Rejected"),
)
status = models.CharField(
max_length=255,
choices=REQUEST_STATUS_CHOICES,
default=REQUEST_STATUS_PENDING
)
history = HistoricalRecords()
class Meta(object):
# Enforce the constraint that each user can have exactly one outstanding
# request to a given provider. Multiple requests use the same UUID.
unique_together = ('username', 'course', 'provider')
get_latest_by = 'created'
@classmethod
def credit_requests_for_user(cls, username):
"""
Retrieve all credit requests for a user.
Arguments:
username (unicode): The username of the user.
Returns: list
Example Usage:
>>> CreditRequest.credit_requests_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return [
{
"uuid": request.uuid,
"timestamp": request.parameters.get("timestamp"),
"course_key": request.course.course_key,
"provider": {
"id": request.provider.provider_id,
"display_name": request.provider.display_name
},
"status": request.status
}
for request in cls.objects.select_related('course', 'provider').filter(username=username)
]
@classmethod
def get_user_request_status(cls, username, course_key):
"""
Returns the latest credit request of user against the given course.
Args:
username(str): The username of requesting user
course_key(CourseKey): The course identifier
Returns:
CreditRequest if any otherwise None
"""
try:
return cls.objects.filter(
username=username, course__course_key=course_key
).select_related('course', 'provider').latest()
except cls.DoesNotExist:
return None
def __unicode__(self):
"""Unicode representation of a credit request."""
return u"{course}, {provider}, {status}".format(
course=self.course.course_key,
provider=self.provider.provider_id,
status=self.status,
)
class CreditConfig(ConfigurationModel):
""" Manage credit configuration """
CACHE_KEY = 'credit.providers.api.data'
cache_ttl = models.PositiveIntegerField(
verbose_name=_("Cache Time To Live"),
default=0,
help_text=_(
"Specified in seconds. Enable caching by setting this to a value greater than 0."
)
)
@property
def is_cache_enabled(self):
"""Whether responses from the commerce API will be cached."""
return self.enabled and self.cache_ttl > 0
def __unicode__(self):
"""Unicode representation of the config. """
return 'Credit Configuration'
| agpl-3.0 | -6,041,291,443,425,161,000 | 34.002732 | 110 | 0.630864 | false |
spidercensus/py-junos-eznc | lib/jnpr/junos/facts/swver.py | 1 | 3314 | import re
class version_info(object):
def __init__(self, verstr):
"""verstr - version string"""
m1 = re.match('(.*?)([RBIXSF-])(.*)', verstr)
self.type = m1.group(2)
self.major = tuple(map(int, m1.group(1).split('.'))) # creates tuyple
after_type = m1.group(3).split('.')
self.minor = after_type[0]
if 'X' == self.type:
# assumes form similar to "45-D10", so extract the bits from this
xm = re.match("(\d+)-(\w)(\d+)", self.minor)
if xm is not None:
self.minor = tuple(
[int(xm.group(1)), xm.group(2), int(xm.group(3))])
if len(after_type) < 2:
self.build = None
else:
self.build = int(after_type[1])
# X type not hyphen format, perhaps "11.4X12.1", just extract
# build rev or set None
else:
if len(after_type) < 2:
self.build = None
else:
self.build = int(after_type[1])
elif ('I' == self.type) or ('-' == self.type):
self.type = 'I'
try:
# assumes that we have a build/spin, but not numeric
self.build = after_type[1]
except:
self.build = None
else:
try:
self.build = int(after_type[1]) # assumes numeric build/spin
except:
self.build = after_type[0] # non-numeric
self.as_tuple = self.major + tuple([self.type, self.minor, self.build])
self.v_dict = {'major': self.major, 'type': self.type,
'minor': self.minor, 'build': self.build}
def __iter__(self):
for key in self.v_dict:
yield key, self.v_dict[key]
def __repr__(self):
retstr = "junos.version_info(major={major}, type={type}," \
" minor={minor}, build={build})".format(
major=self.major,
type=self.type,
minor=self.minor,
build=self.build
)
return retstr
def _cmp_tuple(self, other):
length = len(self) if len(self) < len(other) else len(other)
return self.as_tuple[0:length]
def __len__(self):
length = 0
for component in self.as_tuple:
if component is None:
return length
else:
length += 1
return length
def __lt__(self, other):
return self._cmp_tuple(other) < other
def __le__(self, other):
return self._cmp_tuple(other) <= other
def __gt__(self, other):
return self._cmp_tuple(other) > other
def __ge__(self, other):
return self._cmp_tuple(other) >= other
def __eq__(self, other):
return self._cmp_tuple(other) == other
def __ne__(self, other):
return self._cmp_tuple(other) != other
def version_yaml_representer(dumper, version):
return dumper.represent_mapping(u'tag:yaml.org,2002:map', version.v_dict)
def provides_facts():
"""
Doesn't really provide any facts.
"""
return {}
def get_facts(device):
"""
Doesn't get any facts.
"""
return {}
| apache-2.0 | -4,367,478,845,355,745,300 | 29.127273 | 79 | 0.490042 | false |
kaerdsar/website | website_analytics_piwik/__openerp__.py | 14 | 1454 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Piwik analytics",
"version": "8.0.1.0.0",
"author": "Therp BV,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Website",
"summary": "Track website users using piwik",
"depends": [
'website',
],
"data": [
"views/website_config_settings.xml",
"views/website.xml",
'views/templates.xml',
],
"auto_install": False,
"installable": True,
"application": False,
}
| agpl-3.0 | 3,634,952,265,974,780,000 | 36.282051 | 78 | 0.583219 | false |
DinoCow/airflow | airflow/operators/dagrun_operator.py | 5 | 1152 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.operators.trigger_dagrun`."""
import warnings
# pylint: disable=unused-import
from airflow.operators.trigger_dagrun import TriggerDagRunLink, TriggerDagRunOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.operators.trigger_dagrun`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | -5,327,493,859,159,075,000 | 38.724138 | 93 | 0.770833 | false |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Scripts/EditWithIdleX.py | 5 | 5567 | """
IdleX Windows Configuration
Adds/removes "Edit with IdleX" to the right-click context menu
for .py files.
Author: Roger D. Serwy
Date: 2011-11-28
2012-04-13 modified to work with setup.py
License: See LICENSE.txt
"""
import os
import sys
import shlex
WINREG = True
if sys.version < '3':
import Tkinter as tk
import tkMessageBox as mb
try:
import _winreg as W
except:
WINREG = False
else:
import tkinter as tk
import tkinter.messagebox as mb
try:
import winreg as W
except:
WINREG = False
if 'linux' in sys.platform:
# For Testing on Non-Windows platforms
class dummy_winreg:
def dummy(*args, **kwargs):
pass
def CreateKey(self, *args, **kwargs):
raise Exception('This is not Windows')
DeleteKey = CreateKey
def QueryValue(self, *args, **kwargs):
return r'C:\\Python32\\python.exe'
def __getattr__(self, *args, **kwargs):
return self.dummy
__file__ = 'LINUX'
W = dummy_winreg()
WINREG = True
if WINREG == False:
mb.showerror(title='Edit with IdleX',
message='Unable to import winreg')
sys.exit(1)
def get_python_executable():
""" Get path to python.exe """
reg = W.ConnectRegistry(None, W.HKEY_CLASSES_ROOT)
p = W.OpenKey(reg, r'Python.File\shell\Edit with IDLE\command')
v = W.QueryValue(p, None)
path_to_python = shlex.split(v)[0]
return path_to_python
def get_idlex_module():
try:
mod = __import__('idlexlib')
except:
return None
return mod
def get_idlex_path():
# This assumes EditWithIdleX.py is in same directory as idlex.py
mod = get_idlex_module()
if mod:
head, tail = os.path.split(os.path.abspath(mod.__file__))
path_to_idlex = os.path.join(head, 'launch.py')
else:
head, tail = os.path.split(os.path.abspath(__file__))
head, tail = os.path.split(head)
path_to_idlex = os.path.join(head, 'idlex.py')
if os.path.exists(path_to_idlex):
return path_to_idlex
else:
return 'NOT FOUND. Make sure idlex.py is in same directory as EditWithIdleX.py.'
def build_registry_value():
""" Build the value for "Edit with IdleX" """
path_to_python = get_python_executable()
path_to_idlex = get_idlex_path()
if not os.path.exists(path_to_idlex):
raise Exception('Path to IdleX is not valid.')
regval = '"%(python)s" "%(idlex)s" -e "%%1"' % {'python':path_to_python,
'idlex':path_to_idlex}
return regval
def create_registry_key():
""" Create the "Edit with IdleX" registry key. """
regval = build_registry_value()
_create_registry_key_helper(regval)
def _create_registry_key_helper(regval):
reg = W.ConnectRegistry(None, W.HKEY_CURRENT_USER)
p = W.OpenKey(reg, r'Software\Classes', 0, W.KEY_SET_VALUE)
p2 = W.CreateKey(p, 'Python.File\shell\Edit with IdleX\command')
W.SetValue(p2, '', W.REG_SZ, regval)
W.CloseKey(p2)
W.CloseKey(p)
W.CloseKey(reg)
def delete_registry_key():
""" Delete the "Edit with IdleX" registry key """
reg = W.ConnectRegistry(None, W.HKEY_CURRENT_USER)
p = W.OpenKey(reg, r'Software\Classes\Python.File\shell', 0, W.KEY_ALL_ACCESS)
p2 = W.DeleteKey(p, 'Edit with IdleX\command')
p3 = W.DeleteKey(p, 'Edit with IdleX')
W.CloseKey(p)
W.CloseKey(reg)
def errorbox(err):
mb.showerror(title='Error occurred',
message=err)
def successbox(op=''):
mb.showinfo(title='Success',
message='Operation Successful. %s' % op)
def add_menu_item():
try:
create_registry_key()
successbox("'Edit with IdleX' added.")
except Exception as err:
errorbox(err)
def delete_menu_item():
try:
delete_registry_key()
successbox("'Edit with IdleX' removed.")
except Exception as err:
errorbox(err)
def quitprog():
root.destroy()
def build_gui():
f1 = tk.Frame(root)
f1.config(borderwidth=2, relief=tk.GROOVE)
f1.pack(side=tk.TOP, fill=tk.BOTH, expand=1, padx=5, pady=5)
msg = ["This will configure the right-click context menu",
"item 'Edit with IdleX'. This will sit alongside the",
"'Edit with IDLE' menu item.",
"",
"Python found at: %r" % get_python_executable(),
"IdleX found at: %r" % get_idlex_path(),
"",
"If you change the location of IdleX, re-run this script.",
"Otherwise, no action will occur if you click 'Edit with IdleX'.",
"",
"This program creates a registry key here:",
r"HKEY_CURRENT_USER\Software\Classes\Python.File\shell\Edit with IdleX\command",
]
L = tk.Label(f1, text='\n'.join(msg),
wraplength=300, justify=tk.LEFT)
b1 = tk.Button(f1, text="Add 'Edit with IdleX' to context menu",
command=add_menu_item)
b2 = tk.Button(f1, text="Remove 'Edit with IdleX' from context menu",
command=delete_menu_item)
b3 = tk.Button(f1, text='Exit this program',
command=quitprog)
TOP = tk.TOP
L.pack(side=TOP, fill=tk.X, expand=True)
b1.pack(side=TOP, fill=tk.X, expand=True)
b2.pack(side=TOP, fill=tk.X, expand=True)
b3.pack(side=TOP, fill=tk.X, expand=True)
if __name__ == '__main__':
root = tk.Tk()
root.title('Edit with IdleX')
build_gui()
root.mainloop()
| bsd-3-clause | -853,398,582,292,607,700 | 27.548718 | 91 | 0.599425 | false |
ehopsolidaires/ehop-solidaires.fr | ehop/ehopSolidaire_providers_register/forms.py | 1 | 10906 | # -*- coding: utf-8 -*-
# @copyright (C) 2014-2015
#Developpeurs 'BARDOU AUGUSTIN - BREZILLON ANTOINE - EUZEN DAVID - FRANCOIS SEBASTIEN - JOUNEAU NICOLAS - KIBEYA AISHA - LE CONG SEBASTIEN -
# MAGREZ VALENTIN - NGASSAM NOUMI PAOLA JOVANY - OUHAMMOUCH SALMA - RIAND MORGAN - TREIMOLEIRO ALEX - TRULLA AURELIEN '
# @license https://www.gnu.org/licenses/gpl-3.0.html GPL version 3
from models import *
from django.contrib.auth.models import User as django_User
from datetime import datetime
from django import forms
from django.contrib.gis.geos import Point
class LoginForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
}
exclude = ['name', 'firstname', 'sex', 'city', 'zipCode', 'phone', 'idHomeAddress', 'idWorkAddress']
class EmailAuthBackend(object):
def authenticate(self,username=None, password=None):
try:
user = django_User.objects.get(email=username)
if user and check_password(password, user.password):
return user
except django_User.DoesNotExist:
return None
def authenticate2(self,username=None, password=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user and (check_password(password, user.password)):
return user
except User.DoesNotExist:
return None
def auth_email(self, username=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user:
return user
except User.DoesNotExist:
return None
def auth_email2(self, username=None):
try:
user = django_User.objects.get(email=username)
if user:
return user
except User.DoesNotExist:
return None
class ContactForm(forms.Form):
firstname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
lastname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
phone = forms.CharField(widget=forms.TextInput(
attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}))
sender = forms.EmailField(widget=forms.EmailInput(attrs={'aria-invalid': 'false', 'pattern': 'email'}), required=False)
subjectCHOICES = (('Demandeur','Je cherche un trajet'),('Offreur','Je souhaite proposer un trajet'),
('Infos','Informations diverses'),('Autre','Autre'))
subject = forms.ChoiceField(choices=subjectCHOICES)
goalOfApplicationCHOICES = [('', '')] + list(MenusSettings.objects.filter(type="goalOfApplication").values_list('string', 'string'))
goalOfApplication = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=goalOfApplicationCHOICES, required=False)
yearOfBirthCHOICES = (tuple((str(n), str(n)) for n in range(1900, datetime.now().year - 15))+(('',''),))[::-1]
yearOfBirth = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=yearOfBirthCHOICES, required=False)
message = forms.CharField(widget=forms.Textarea(attrs={'required': 'required'}))
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['goalOfApplication'].choices = get_menus_settings('goalOfApplication')
def get_menus_settings(type, required=True):
if required:
return [('', '')] + list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
else:
return list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
class UserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'sex': forms.RadioSelect(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode',
'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true',
'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress']
class ProviderRegisterForm(forms.ModelForm):
class Meta:
model = Provider
howKnowledgeCHOICES = get_menus_settings('howKnowledge')
widgets = {
'password': forms.PasswordInput(attrs={'id': 'password', 'required': 'required'}),
'company': forms.TextInput(attrs={'list':'datalistCompany', 'autocomplete':'off'}),
'howKnowledge': forms.Select(attrs={'required':'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login']
def __init__(self, *args, **kwargs):
super(ProviderRegisterForm, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class ProviderForm2(forms.ModelForm):
class Meta:
model = Provider
howKnowledgeCHOICES = [('','')] + list(MenusSettings.objects.filter(type="howKnowledge").values_list('string', 'string'))
widgets = {
'company': forms.TextInput(attrs={'list': 'datalistCompany', 'autocomplete': 'off'}),
'howKnowledge': forms.Select(attrs={'required': 'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login', 'password']
def __init__(self, *args, **kwargs):
super(ProviderForm2, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class AddressRegisterForm(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street':forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu',
'autocomplete': 'on', 'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterForm, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordTab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordTab[0]), float(coordTab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode'] = zipcode
return cleaned_data
class AddressRegisterFormWork(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu', 'autocomplete': 'on',
'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterFormWork, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordtab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordtab[0]), float(coordtab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode']= zipcode
return cleaned_data
class PathDepartureRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value': '08:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class PathArrivalRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value':'18:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class TestUserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode', 'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress', 'sex']
class newMdpForm(forms.Form):
oldmdp = forms.CharField(widget=forms.PasswordInput(), label='Ancien mot de passe', required=True)
newmdp1 = forms.CharField(widget=forms.PasswordInput(), label='Nouveau mot de passe', required=True) | agpl-3.0 | -7,623,138,704,339,617,000 | 44.070248 | 143 | 0.599303 | false |
Rogergonzalez21/TelegramSGirlsBot | app.py | 1 | 4445 | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import telegram, logging, requests, json, secrets, botan
from random import randint
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
headers = {
'User-Agent': 'sgirlsbot Telegram bot (by /u/rogergonzalez21)',
'From': '[email protected]'
}
def get_data(query):
r = requests.get(query, headers=headers)
data = r.json()
result = []
for child in data['data']['children']:
if child['data']['url'].endswith(('.jpg', '.png', '.gif')):
result.append({'title' : child['data']['title'], 'url' : child['data']['url']})
return result
def botan_track(update):
uid = update.message.from_user.id
message_dict = update.message.to_dict()
event_name = update.message.text.split(' ', 1)[0]
if uid not in secrets.exclude_analytics:
print(botan.track(secrets.botan_token, uid, message_dict, event_name))
def start(bot, update):
botan_track(update)
bot.sendMessage(update.message.chat_id, text="Hi! To get a random suicide girl, type <b>/random</b>. To get a specific Suicide Girl, type /suicide 'name'. For example, <b>/suicide bixton</b>. Enjoy!", parse_mode=telegram.ParseMode.HTML)
def help(bot, update):
botan_track(update)
bot.sendMessage(update.message.chat_id, text="To get a random suicide girl, type <b>/random</b>. To get a specific Suicide Girl, type /suicide 'name'. For example, <b>/suicide bixton</b>.", parse_mode=telegram.ParseMode.HTML)
def random(bot, update):
botan_track(update)
query = r'https://www.reddit.com/r/suicidegirls/search.json?q=&restrict_sr=on&sort=relevance&t=all&limit=100'
suicides = get_data(query)
random_suicide = suicides[randint(0,len(suicides)-1)]
try:
bot.sendMessage(chat_id=update.message.chat_id, text='%s - %s'%(random_suicide['title'], random_suicide['url']))
except:
bot.sendMessage(chat_id=update.message.chat_id, text='Sorry, we had an issue. Please, try again.')
def suicide(bot, update):
botan_track(update)
suicide = update.message.text[9:]
query = r'http://www.reddit.com/r/suicidegirls/search.json?q=title:%s+site:imgur.com+nsfw:yes+subreddit:suicidegirls'%(suicide)
if suicide == '':
echo(bot, update)
else:
suicide_list = get_data(query)
if len(suicide_list) == 0:
bot.sendMessage(update.message.chat_id, text="I couldn't find any pictures of %s. Sorry :(" %suicide)
else:
random_suicide = suicide_list[randint(0,len(suicide_list)-1)]
try:
bot.sendMessage(chat_id=update.message.chat_id, text='%s - %s'%(random_suicide['title'], random_suicide['url']))
except:
bot.sendMessage(chat_id=update.message.chat_id, text='Sorry, we had an issue. Please, try again.')
def echo(bot, update):
bot.sendMessage(update.message.chat_id, text="Please, use /random or /suicide 'name'")
def developer(bot, update):
botan_track(update)
bot.sendMessage(update.message.chat_id, text='Made by @Rogergonzalez21 with a lot of help from @CelisFlen_Bers. GitHub repo: https://github.com/Rogergonzalez21/TelegramSGirlsBot')
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
# Create the EventHandler and pass it your bot's token.
updater = Updater(secrets.bot_token)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.addHandler(CommandHandler("start", start))
dp.addHandler(CommandHandler("help", help))
dp.addHandler(CommandHandler("random", random))
dp.addHandler(CommandHandler("suicide", suicide))
dp.addHandler(CommandHandler("developer", developer))
# on noncommand i.e message - echo the message on Telegram
dp.addHandler(MessageHandler([Filters.text], echo))
# log all errors
dp.addErrorHandler(error)
# Start the Bot
updater.start_polling()
# Run the bot until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main() | mit | -2,338,562,011,305,615,000 | 36.677966 | 240 | 0.668841 | false |
jurcicek/extended-hidden-vector-state-parser | confusion/clnum-1.4/build/lib.linux-i686-2.4/clnum/__init__.py | 2 | 2479 | #-----------------------------------------------------------------------------
# Copyright (c) 2006 Raymond L. Buvel
#
# This file is part of clnum, a Python interface to the Class Library for
# Numbers. This module provides module initialization and some functions coded
# in Python instead of C++.
#
# The clnum module is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# The clnum nodule is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# clnum; if not, write to the Free Software Foundation, Inc., 59 Temple Place,
# Suite 330, Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------------
from clnum import *
__doc__ = clnum.__doc__
_version = '1.4'
def ratapx(x, maxint=0x7FFF):
'''Return the input value x approximated as a rational number where the
numerator and denominator are both less than or equal to maxint.
'''
x = mpq(x)
u = x.numer
v = x.denom
f = False # Indicates the fraction is not flipped
if maxint <= 1:
raise ValueError('maxint must be greater than 1')
sign = 1
if u < 0:
u = -u
sign = -1
if u == v:
return mpq(sign*1)
if u == 0:
return mpq(0)
if v < u:
# Flip so that u/v < 1
f = True
u,v = v,u
# Initialize and check for overflow
an = v//u
if an > maxint:
raise ValueError('Cannot satisfy maxint constraint')
xn1 = 1; xn2 = 0
yn1 = an; yn2 = 1
v,u = u, v%u
while u:
# Compute the next term in the continued fraction expansion.
v, (an,u) = u, divmod(v,u)
# Reconstruct the fraction and quit when it is no longer representable.
xn = an*xn1 + xn2
if xn > maxint:
break
yn = an*yn1 + yn2
if yn > maxint:
break
xn2, xn1, yn2, yn1 = xn1, xn, yn1, yn
# xn1 and yn1 contain the properly rounded fraction in lowest terms.
if f:
return mpq(sign*yn1, xn1)
else:
return mpq(sign*xn1, yn1)
| gpl-2.0 | -6,374,207,803,796,737,000 | 27.825581 | 79 | 0.588544 | false |
setten/pymatgen | pymatgen/analysis/bond_valence.py | 6 | 21514 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import collections
import numpy as np
import operator
import os
from math import exp, sqrt
from six.moves import filter
from six.moves import zip
from monty.serialization import loadfn
import six
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import get_el_sp
"""
This module implements classes to perform bond valence analyses.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Oct 26, 2012"
# Let's initialize some module level properties.
# List of electronegative elements specified in M. O'Keefe, & N. Brese,
# JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
ELECTRONEG = [Element(sym) for sym in ["H", "B", "C", "Si",
"N", "P", "As", "Sb",
"O", "S", "Se", "Te",
"F", "Cl", "Br", "I"]]
module_dir = os.path.dirname(os.path.abspath(__file__))
# Read in BV parameters.
BV_PARAMS = {}
for k, v in loadfn(os.path.join(module_dir, "bvparam_1991.yaml")).items():
BV_PARAMS[Element(k)] = v
# Read in yaml containing data-mined ICSD BV data.
all_data = loadfn(os.path.join(module_dir, "icsd_bv.yaml"))
ICSD_BV_DATA = {Specie.from_string(sp): data
for sp, data in all_data["bvsum"].items()}
PRIOR_PROB = {Specie.from_string(sp): data
for sp, data in all_data["occurrence"].items()}
def calculate_bv_sum(site, nn_list, scale_factor=1.0):
"""
Calculates the BV sum of a site.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
el1 = Element(site.specie.symbol)
bvsum = 0
for (nn, dist) in nn_list:
el2 = Element(nn.specie.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += vij * (1 if el1.X < el2.X else -1)
return bvsum
def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):
"""
Calculates the BV sum of a site for unordered structures.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
# If the site "site" has N partial occupations as : f_{site}_0,
# f_{site}_1, ... f_{site}_N of elements
# X_{site}_0, X_{site}_1, ... X_{site}_N, and each neighbors nn_i in nn
# has N_{nn_i} partial occupations as :
# f_{nn_i}_0, f_{nn_i}_1, ..., f_{nn_i}_{N_{nn_i}}, then the bv sum of
# site "site" is obtained as :
# \sum_{nn} \sum_j^N \sum_k^{N_{nn}} f_{site}_j f_{nn_i}_k vij_full
# where vij_full is the valence bond of the fully occupied bond
bvsum = 0
for specie1, occu1 in six.iteritems(site.species_and_occu):
el1 = Element(specie1.symbol)
for (nn, dist) in nn_list:
for specie2, occu2 in six.iteritems(nn.species_and_occu):
el2 = Element(specie2.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += occu1 * occu2 * vij * (1 if el1.X < el2.X else -1)
return bvsum
class BVAnalyzer(object):
"""
This class implements a maximum a posteriori (MAP) estimation method to
determine oxidation states in a structure. The algorithm is as follows:
1) The bond valence sum of all symmetrically distinct sites in a structure
is calculated using the element-based parameters in M. O'Keefe, & N. Brese,
JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
2) The posterior probabilities of all oxidation states is then calculated
using: P(oxi_state/BV) = K * P(BV/oxi_state) * P(oxi_state), where K is
a constant factor for each element. P(BV/oxi_state) is calculated as a
Gaussian with mean and std deviation determined from an analysis of
the ICSD. The posterior P(oxi_state) is determined from a frequency
analysis of the ICSD.
3) The oxidation states are then ranked in order of decreasing probability
and the oxidation state combination that result in a charge neutral cell
is selected.
"""
CHARGE_NEUTRALITY_TOLERANCE = 0.00001
def __init__(self, symm_tol=0.1, max_radius=4, max_permutations=100000,
distance_scale_factor=1.015,
charge_neutrality_tolerance=CHARGE_NEUTRALITY_TOLERANCE,
forbidden_species=None):
"""
Initializes the BV analyzer, with useful defaults.
Args:
symm_tol:
Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius:
Maximum radius in Angstrom used to find nearest neighbors.
max_permutations:
The maximum number of permutations of oxidation states to test.
distance_scale_factor:
A scale factor to be applied. This is useful for scaling
distances, esp in the case of calculation-relaxed structures
which may tend to under (GGA) or over bind (LDA). The default
of 1.015 works for GGA. For experimental structure, set this to
1.
charge_neutrality_tolerance:
Tolerance on the charge neutrality when unordered structures
are at stake.
forbidden_species:
List of species that are forbidden (example : ["O-"] cannot be
used) It is used when e.g. someone knows that some oxidation
state cannot occur for some atom in a structure or list of
structures.
"""
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.dist_scale_factor = distance_scale_factor
self.charge_neutrality_tolerance = charge_neutrality_tolerance
forbidden_species = [get_el_sp(sp) for sp in forbidden_species] if \
forbidden_species else []
self.icsd_bv_data = {get_el_sp(specie): data
for specie, data in ICSD_BV_DATA.items()
if specie not in forbidden_species} \
if len(forbidden_species) > 0 else ICSD_BV_DATA
def _calc_site_probabilities(self, site, nn):
el = site.specie.symbol
bv_sum = calculate_bv_sum(site, nn,
scale_factor=self.dist_scale_factor)
prob = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
# Calculate posterior probability. Note that constant
# factors are ignored. They have no effect on the results.
prob[sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
# Normalize the probabilities
try:
prob = {k: v / sum(prob.values()) for k, v in prob.items()}
except ZeroDivisionError:
prob = {k: 0.0 for k in prob}
return prob
def _calc_site_probabilities_unordered(self, site, nn):
bv_sum = calculate_bv_sum_unordered(
site, nn, scale_factor=self.dist_scale_factor)
prob = {}
for specie, occu in six.iteritems(site.species_and_occu):
el = specie.symbol
prob[el] = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
# Calculate posterior probability. Note that constant
# factors are ignored. They have no effect on the results.
prob[el][sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
# Normalize the probabilities
try:
prob[el] = {k: v / sum(prob[el].values())
for k, v in prob[el].items()}
except ZeroDivisionError:
prob[el] = {k: 0.0 for k in prob[el]}
return prob
def get_valences(self, structure):
"""
Returns a list of valences for the structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A list of valences for each site in the structure (for an ordered
structure), e.g., [1, 1, -2] or a list of lists with the
valences for each fractional element of each site in the
structure (for an unordered structure),
e.g., [[2, 4], [3], [-2], [-2], [-2]]
Raises:
A ValueError if the valences cannot be determined.
"""
els = [Element(el.symbol) for el in structure.composition.elements]
if not set(els).issubset(set(BV_PARAMS.keys())):
raise ValueError(
"Structure contains elements not in set of BV parameters!"
)
# Perform symmetry determination and get sites grouped by symmetry.
if self.symm_tol:
finder = SpacegroupAnalyzer(structure, self.symm_tol)
symm_structure = finder.get_symmetrized_structure()
equi_sites = symm_structure.equivalent_sites
else:
equi_sites = [[site] for site in structure]
# Sort the equivalent sites by decreasing electronegativity.
equi_sites = sorted(equi_sites,
key=lambda sites: -sites[0].species_and_occu
.average_electroneg)
# Get a list of valences and probabilities for each symmetrically
# distinct site.
valences = []
all_prob = []
if structure.is_ordered:
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities(test_site, nn)
all_prob.append(prob)
val = list(prob.keys())
# Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[v])
# Retain probabilities that are at least 1/100 of highest prob.
valences.append(
list(filter(lambda v: prob[v] > 0.01 * prob[val[0]],
val)))
else:
full_all_prob = []
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities_unordered(test_site, nn)
all_prob.append(prob)
full_all_prob.extend(prob.values())
vals = []
for (elsp, occ) in get_z_ordered_elmap(
test_site.species_and_occu):
val = list(prob[elsp.symbol].keys())
# Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[elsp.symbol][v])
# Retain probabilities that are at least 1/100 of highest
# prob.
vals.append(
list(filter(
lambda v: prob[elsp.symbol][v] > 0.001 * prob[
elsp.symbol][val[0]], val)))
valences.append(vals)
# make variables needed for recursion
if structure.is_ordered:
nsites = np.array([len(i) for i in equi_sites])
vmin = np.array([min(i) for i in valences])
vmax = np.array([max(i) for i in valences])
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
for i, sites in enumerate(equi_sites):
el_oxi[sites[0].specie.symbol].append(v_set[i])
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 1:
return
score = six.moves.reduce(
operator.mul, [all_prob[i][v] for i, v in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
# recurses to find permutations of valences based on whether a
# charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= nsites
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= nsites
lowest = np.sum(lowest)
if highest < 0 or lowest > 0:
self._n += 1
return
if i == len(valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
else:
nsites = np.array([len(i) for i in equi_sites])
tmp = []
attrib = []
for insite, nsite in enumerate(nsites):
for val in valences[insite]:
tmp.append(nsite)
attrib.append(insite)
new_nsites = np.array(tmp)
fractions = []
elements = []
for sites in equi_sites:
for sp, occu in get_z_ordered_elmap(sites[0].species_and_occu):
elements.append(sp.symbol)
fractions.append(occu)
fractions = np.array(fractions, np.float)
new_valences = []
for vals in valences:
for val in vals:
new_valences.append(val)
vmin = np.array([min(i) for i in new_valences], np.float)
vmax = np.array([max(i) for i in new_valences], np.float)
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
jj = 0
for i, sites in enumerate(equi_sites):
for specie, occu in get_z_ordered_elmap(
sites[0].species_and_occu):
el_oxi[specie.symbol].append(v_set[jj])
jj += 1
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 2:
return
score = six.moves.reduce(
operator.mul,
[all_prob[attrib[iv]][elements[iv]][vv]
for iv, vv in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
# recurses to find permutations of valences based on whether a
# charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= new_nsites
highest *= fractions
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= new_nsites
lowest *= fractions
lowest = np.sum(lowest)
if (highest < -self.charge_neutrality_tolerance or
lowest > self.charge_neutrality_tolerance):
self._n += 1
return
if i == len(new_valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in new_valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
_recurse()
if self._best_vset:
if structure.is_ordered:
assigned = {}
for val, sites in zip(self._best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [int(assigned[site]) for site in structure]
else:
assigned = {}
new_best_vset = []
for ii in range(len(equi_sites)):
new_best_vset.append(list())
for ival, val in enumerate(self._best_vset):
new_best_vset[attrib[ival]].append(val)
for val, sites in zip(new_best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [[int(frac_site) for frac_site in assigned[site]]
for site in structure]
else:
raise ValueError("Valences cannot be assigned!")
def get_oxi_state_decorated_structure(self, structure):
"""
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined.
"""
s = structure.copy()
if s.is_ordered:
valences = self.get_valences(s)
s.add_oxidation_state_by_site(valences)
else:
valences = self.get_valences(s)
s = add_oxidation_state_by_site_fraction(s, valences)
return s
def get_z_ordered_elmap(comp):
"""
Arbitrary ordered elmap on the elements/species of a composition of a
given site in an unordered structure. Returns a list of tuples (
element_or_specie: occupation) in the arbitrary order.
The arbitrary order is based on the Z of the element and the smallest
fractional occupations first.
Example : {"Ni3+": 0.2, "Ni4+": 0.2, "Cr3+": 0.15, "Zn2+": 0.34,
"Cr4+": 0.11} will yield the species in the following order :
Cr4+, Cr3+, Ni3+, Ni4+, Zn2+ ... or
Cr4+, Cr3+, Ni4+, Ni3+, Zn2+
"""
return sorted([(elsp, comp[elsp]) for elsp in comp.keys()])
def add_oxidation_state_by_site_fraction(structure, oxidation_states):
"""
Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]]
"""
try:
for i, site in enumerate(structure):
new_sp = collections.defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site
.species_and_occu)):
specie = Specie(el.symbol, oxidation_states[i][j])
new_sp[specie] += occu
structure[i] = new_sp
return structure
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the list.")
| mit | -1,407,815,677,928,642,600 | 39.363977 | 80 | 0.524914 | false |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/utils/localinterfaces.py | 10 | 8414 | """Simple utility for building a list of local IPs using the socket module.
This module defines two constants:
LOCALHOST : The loopback interface, or the first interface that points to this
machine. It will *almost* always be '127.0.0.1'
LOCAL_IPS : A list of IP addresses, loopback first, that point to this machine.
This will include LOCALHOST, PUBLIC_IPS, and aliases for all hosts,
such as '0.0.0.0'.
PUBLIC_IPS : A list of public IP addresses that point to this machine.
Use these to tell remote clients where to find you.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import socket
from .data import uniq_stable
from .process import get_output_error_code
from .warn import warn
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
LOCAL_IPS = []
PUBLIC_IPS = []
LOCALHOST = ''
def _only_once(f):
"""decorator to only run a function once"""
f.called = False
def wrapped(**kwargs):
if f.called:
return
ret = f(**kwargs)
f.called = True
return ret
return wrapped
def _requires_ips(f):
"""decorator to ensure load_ips has been run before f"""
def ips_loaded(*args, **kwargs):
_load_ips()
return f(*args, **kwargs)
return ips_loaded
# subprocess-parsing ip finders
class NoIPAddresses(Exception):
pass
def _populate_from_list(addrs):
"""populate local and public IPs from flat list of all IPs"""
if not addrs:
raise NoIPAddresses
global LOCALHOST
public_ips = []
local_ips = []
for ip in addrs:
local_ips.append(ip)
if not ip.startswith('127.'):
public_ips.append(ip)
elif not LOCALHOST:
LOCALHOST = ip
if not LOCALHOST:
LOCALHOST = '127.0.0.1'
local_ips.insert(0, LOCALHOST)
local_ips.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = uniq_stable(local_ips)
PUBLIC_IPS[:] = uniq_stable(public_ips)
def _load_ips_ifconfig():
"""load ip addresses from `ifconfig` output (posix)"""
out, err, rc = get_output_error_code('ifconfig')
if rc:
# no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH
out, err, rc = get_output_error_code('/sbin/ifconfig')
if rc:
raise IOError("no ifconfig: %s" % err)
lines = out.splitlines()
addrs = []
for line in lines:
blocks = line.lower().split()
if (len(blocks) >= 2) and (blocks[0] == 'inet'):
if blocks[1].startswith("addr:"):
addrs.append(blocks[1].split(":")[1])
else:
addrs.append(blocks[1])
_populate_from_list(addrs)
def _load_ips_ip():
"""load ip addresses from `ip addr` output (Linux)"""
out, err, rc = get_output_error_code('ip addr')
if rc:
raise IOError("no ip: %s" % err)
lines = out.splitlines()
addrs = []
for line in lines:
blocks = line.lower().split()
if (len(blocks) >= 2) and (blocks[0] == 'inet'):
addrs.append(blocks[1].split('/')[0])
_populate_from_list(addrs)
_ipconfig_ipv4_pat = re.compile(r'ipv4.*?(\d+\.\d+\.\d+\.\d+)$', re.IGNORECASE)
def _load_ips_ipconfig():
"""load ip addresses from `ipconfig` output (Windows)"""
out, err, rc = get_output_error_code('ipconfig')
if rc:
raise IOError("no ipconfig: %s" % err)
lines = out.splitlines()
addrs = []
for line in lines:
m = _ipconfig_ipv4_pat.match(line.strip())
if m:
addrs.append(m.group(1))
_populate_from_list(addrs)
def _load_ips_netifaces():
"""load ip addresses with netifaces"""
import netifaces
global LOCALHOST
local_ips = []
public_ips = []
# list of iface names, 'lo0', 'eth0', etc.
for iface in netifaces.interfaces():
# list of ipv4 addrinfo dicts
ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])
for entry in ipv4s:
addr = entry.get('addr')
if not addr:
continue
if not (iface.startswith('lo') or addr.startswith('127.')):
public_ips.append(addr)
elif not LOCALHOST:
LOCALHOST = addr
local_ips.append(addr)
if not LOCALHOST:
# we never found a loopback interface (can this ever happen?), assume common default
LOCALHOST = '127.0.0.1'
local_ips.insert(0, LOCALHOST)
local_ips.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = uniq_stable(local_ips)
PUBLIC_IPS[:] = uniq_stable(public_ips)
def _load_ips_gethostbyname():
"""load ip addresses with socket.gethostbyname_ex
This can be slow.
"""
global LOCALHOST
try:
LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2]
except socket.error:
# assume common default
LOCAL_IPS[:] = ['127.0.0.1']
try:
hostname = socket.gethostname()
PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2]
# try hostname.local, in case hostname has been short-circuited to loopback
if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS):
PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2]
except socket.error:
pass
finally:
PUBLIC_IPS[:] = uniq_stable(PUBLIC_IPS)
LOCAL_IPS.extend(PUBLIC_IPS)
# include all-interface aliases: 0.0.0.0 and ''
LOCAL_IPS.extend(['0.0.0.0', ''])
LOCAL_IPS[:] = uniq_stable(LOCAL_IPS)
LOCALHOST = LOCAL_IPS[0]
def _load_ips_dumb():
"""Fallback in case of unexpected failure"""
global LOCALHOST
LOCALHOST = '127.0.0.1'
LOCAL_IPS[:] = [LOCALHOST, '0.0.0.0', '']
PUBLIC_IPS[:] = []
@_only_once
def _load_ips(suppress_exceptions=True):
"""load the IPs that point to this machine
This function will only ever be called once.
It will use netifaces to do it quickly if available.
Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate.
Finally, it will fallback on socket.gethostbyname_ex, which can be slow.
"""
try:
# first priority, use netifaces
try:
return _load_ips_netifaces()
except ImportError:
pass
# second priority, parse subprocess output (how reliable is this?)
if os.name == 'nt':
try:
return _load_ips_ipconfig()
except (IOError, NoIPAddresses):
pass
else:
try:
return _load_ips_ifconfig()
except (IOError, NoIPAddresses):
pass
try:
return _load_ips_ip()
except (IOError, NoIPAddresses):
pass
# lowest priority, use gethostbyname
return _load_ips_gethostbyname()
except Exception as e:
if not suppress_exceptions:
raise
# unexpected error shouldn't crash, load dumb default values instead.
warn("Unexpected error discovering local network interfaces: %s" % e)
_load_ips_dumb()
@_requires_ips
def local_ips():
"""return the IP addresses that point to this machine"""
return LOCAL_IPS
@_requires_ips
def public_ips():
"""return the IP addresses for this machine that are visible to other machines"""
return PUBLIC_IPS
@_requires_ips
def localhost():
"""return ip for localhost (almost always 127.0.0.1)"""
return LOCALHOST
@_requires_ips
def is_local_ip(ip):
"""does `ip` point to this machine?"""
return ip in LOCAL_IPS
@_requires_ips
def is_public_ip(ip):
"""is `ip` a publicly visible address?"""
return ip in PUBLIC_IPS
| bsd-3-clause | -5,542,274,935,904,721,000 | 29.266187 | 97 | 0.558831 | false |
luotao1/Paddle | python/paddle/fluid/tests/unittests/test_naive_best_fit_gpu_memory_limit.py | 2 | 1634 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import unittest
import numpy as np
fluid.core.globals()['FLAGS_allocator_strategy'] = 'naive_best_fit'
if fluid.is_compiled_with_cuda():
fluid.core.globals()['FLAGS_gpu_memory_limit_mb'] = 10
class TestBase(unittest.TestCase):
def setUp(self):
if fluid.is_compiled_with_cuda():
self._limit = fluid.core.globals()['FLAGS_gpu_memory_limit_mb']
def test_allocate(self):
if not fluid.is_compiled_with_cuda():
return
other_dim = int(1024 * 1024 / 4)
place = fluid.CUDAPlace(0)
t = fluid.LoDTensor()
t.set(np.ndarray(
[int(self._limit / 2), other_dim], dtype='float32'),
place)
del t
t = fluid.LoDTensor()
large_np = np.ndarray([2 * self._limit, other_dim], dtype='float32')
try:
t.set(large_np, place)
self.assertTrue(False)
except:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,708,300,202,930,062,000 | 29.259259 | 76 | 0.644431 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.