gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
class TestElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64")
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
class TestBF16ElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.dtype = np.uint16
x = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
y = np.random.uniform(0.1, 1, [13, 17]).astype(np.float32)
out = x - y
self.inputs = {
'X': convert_float_to_uint16(x),
'Y': convert_float_to_uint16(y)
}
self.outputs = {'Out': convert_float_to_uint16(out)}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float64),
'Y': np.random.rand(1).astype(np.float64)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.random((100, )).astype("float64"),
'Y': np.random.random((100, )).astype("float64")
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(100, 3, 2).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
}
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
}
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(100).astype(np.float64)
}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
}
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float64),
'Y': np.random.rand(10, 12).astype(np.float64)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
}
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float64),
'Y': np.random.rand(2, 5, 1, 12).astype(np.float64)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float64),
'Y': np.random.rand(1, 1, 100).astype(np.float64)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float64),
'Y': np.random.rand(10, 1, 12, 1).astype(np.float64)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 12).astype(np.float64),
'Y': np.random.rand(2, 3, 10, 12).astype(np.float64)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y']
}
class TestComplexElementwiseSubOp(OpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.dtype = np.float64
self.shape = (2, 3, 4, 5)
self.init_input_output()
self.init_grad_input_output()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.attrs = {'axis': -1, 'use_mkldnn': False}
self.outputs = {'Out': self.out}
def init_base_dtype(self):
self.dtype = np.float64
def init_input_output(self):
self.x = np.random.random(self.shape).astype(
self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
self.y = np.random.random(self.shape).astype(
self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
self.out = self.x - self.y
def init_grad_input_output(self):
self.grad_out = np.ones(self.shape, self.dtype) + 1J * np.ones(
self.shape, self.dtype)
self.grad_x = self.grad_out
self.grad_y = -self.grad_out
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'],
'Out',
user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out])
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'],
'Out',
no_grad_set=set("X"),
user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out])
def test_check_grad_ingore_y(self):
self.check_grad(
['X'],
'Out',
no_grad_set=set('Y'),
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out])
class TestRealComplexElementwiseSubOp(TestComplexElementwiseSubOp):
def init_input_output(self):
self.x = np.random.random(self.shape).astype(self.dtype)
self.y = np.random.random(self.shape).astype(
self.dtype) + 1J * np.random.random(self.shape).astype(self.dtype)
self.out = self.x - self.y
def init_grad_input_output(self):
self.grad_out = np.ones(self.shape, self.dtype) + 1J * np.ones(
self.shape, self.dtype)
self.grad_x = np.real(self.grad_out)
self.grad_y = -self.grad_out
class TestSubtractApi(unittest.TestCase):
def _executed_api(self, x, y, name=None):
return paddle.subtract(x, y, name)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')
y_1 = self._executed_api(x, y, name='subtract_res')
self.assertEqual(('subtract_res' in y_1.name), True)
def test_declarative(self):
with fluid.program_guard(fluid.Program()):
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = self._executed_api(x, y)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
z_expected = np.array([1., -2., 2.])
self.assertEqual((z_value == z_expected).all(), True)
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
z = self._executed_api(x, y)
np_z = z.numpy()
z_expected = np.array([1., -2., 2.])
self.assertEqual((np_z == z_expected).all(), True)
class TestSubtractInplaceApi(TestSubtractApi):
def _executed_api(self, x, y, name=None):
return x.subtract_(y, name)
class TestSubtractInplaceBroadcastSuccess(unittest.TestCase):
def init_data(self):
self.x_numpy = np.random.rand(2, 3, 4).astype('float')
self.y_numpy = np.random.rand(3, 4).astype('float')
def test_broadcast_success(self):
paddle.disable_static()
self.init_data()
x = paddle.to_tensor(self.x_numpy)
y = paddle.to_tensor(self.y_numpy)
inplace_result = x.subtract_(y)
numpy_result = self.x_numpy - self.y_numpy
self.assertEqual((inplace_result.numpy() == numpy_result).all(), True)
paddle.enable_static()
class TestSubtractInplaceBroadcastSuccess2(TestSubtractInplaceBroadcastSuccess):
def init_data(self):
self.x_numpy = np.random.rand(1, 2, 3, 1).astype('float')
self.y_numpy = np.random.rand(3, 1).astype('float')
class TestSubtractInplaceBroadcastSuccess3(TestSubtractInplaceBroadcastSuccess):
def init_data(self):
self.x_numpy = np.random.rand(2, 3, 1, 5).astype('float')
self.y_numpy = np.random.rand(1, 3, 1, 5).astype('float')
class TestSubtractInplaceBroadcastError(unittest.TestCase):
def init_data(self):
self.x_numpy = np.random.rand(3, 4).astype('float')
self.y_numpy = np.random.rand(2, 3, 4).astype('float')
def test_broadcast_errors(self):
paddle.disable_static()
self.init_data()
x = paddle.to_tensor(self.x_numpy)
y = paddle.to_tensor(self.y_numpy)
def broadcast_shape_error():
x.subtract_(y)
self.assertRaises(ValueError, broadcast_shape_error)
paddle.enable_static()
class TestSubtractInplaceBroadcastError2(TestSubtractInplaceBroadcastError):
def init_data(self):
self.x_numpy = np.random.rand(2, 1, 4).astype('float')
self.y_numpy = np.random.rand(2, 3, 4).astype('float')
class TestSubtractInplaceBroadcastError3(TestSubtractInplaceBroadcastError):
def init_data(self):
self.x_numpy = np.random.rand(5, 2, 1, 4).astype('float')
self.y_numpy = np.random.rand(2, 3, 4).astype('float')
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
|
|
from sympy.core import symbols, Symbol, Tuple, oo
from sympy.core.compatibility import iterable, range
from sympy.tensor.indexed import IndexException
from sympy.utilities.pytest import raises
# import test:
from sympy import IndexedBase, Idx, Indexed, S, sin, cos, Sum, Piecewise, And, Order
def test_Idx_construction():
i, a, b = symbols('i a b', integer=True)
assert Idx(i) != Idx(i, 1)
assert Idx(i, a) == Idx(i, (0, a - 1))
assert Idx(i, oo) == Idx(i, (0, oo))
x = symbols('x')
raises(TypeError, lambda: Idx(x))
raises(TypeError, lambda: Idx(0.5))
raises(TypeError, lambda: Idx(i, x))
raises(TypeError, lambda: Idx(i, 0.5))
raises(TypeError, lambda: Idx(i, (x, 5)))
raises(TypeError, lambda: Idx(i, (2, x)))
raises(TypeError, lambda: Idx(i, (2, 3.5)))
def test_Idx_properties():
i, a, b = symbols('i a b', integer=True)
assert Idx(i).is_integer
def test_Idx_bounds():
i, a, b = symbols('i a b', integer=True)
assert Idx(i).lower is None
assert Idx(i).upper is None
assert Idx(i, a).lower == 0
assert Idx(i, a).upper == a - 1
assert Idx(i, 5).lower == 0
assert Idx(i, 5).upper == 4
assert Idx(i, oo).lower == 0
assert Idx(i, oo).upper == oo
assert Idx(i, (a, b)).lower == a
assert Idx(i, (a, b)).upper == b
assert Idx(i, (1, 5)).lower == 1
assert Idx(i, (1, 5)).upper == 5
assert Idx(i, (-oo, oo)).lower == -oo
assert Idx(i, (-oo, oo)).upper == oo
def test_Idx_fixed_bounds():
i, a, b, x = symbols('i a b x', integer=True)
assert Idx(x).lower is None
assert Idx(x).upper is None
assert Idx(x, a).lower == 0
assert Idx(x, a).upper == a - 1
assert Idx(x, 5).lower == 0
assert Idx(x, 5).upper == 4
assert Idx(x, oo).lower == 0
assert Idx(x, oo).upper == oo
assert Idx(x, (a, b)).lower == a
assert Idx(x, (a, b)).upper == b
assert Idx(x, (1, 5)).lower == 1
assert Idx(x, (1, 5)).upper == 5
assert Idx(x, (-oo, oo)).lower == -oo
assert Idx(x, (-oo, oo)).upper == oo
def test_Idx_func_args():
i, a, b = symbols('i a b', integer=True)
ii = Idx(i)
assert ii.func(*ii.args) == ii
ii = Idx(i, a)
assert ii.func(*ii.args) == ii
ii = Idx(i, (a, b))
assert ii.func(*ii.args) == ii
def test_Idx_subs():
i, a, b = symbols('i a b', integer=True)
assert Idx(i, a).subs(a, b) == Idx(i, b)
assert Idx(i, a).subs(i, b) == Idx(b, a)
assert Idx(i).subs(i, 2) == Idx(2)
assert Idx(i, a).subs(a, 2) == Idx(i, 2)
assert Idx(i, (a, b)).subs(i, 2) == Idx(2, (a, b))
def test_IndexedBase_sugar():
i, j = symbols('i j', integer=True)
a = symbols('a')
A1 = Indexed(a, i, j)
A2 = IndexedBase(a)
assert A1 == A2[i, j]
assert A1 == A2[(i, j)]
assert A1 == A2[[i, j]]
assert A1 == A2[Tuple(i, j)]
assert all(a.is_Integer for a in A2[1, 0].args[1:])
def test_IndexedBase_subs():
i, j, k = symbols('i j k', integer=True)
a, b, c = symbols('a b c')
A = IndexedBase(a)
B = IndexedBase(b)
C = IndexedBase(c)
assert A[i] == B[i].subs(b, a)
assert isinstance(C[1].subs(C, {1: 2}), type(A[1]))
def test_IndexedBase_shape():
i, j, m, n = symbols('i j m n', integer=True)
a = IndexedBase('a', shape=(m, m))
b = IndexedBase('a', shape=(m, n))
assert b.shape == Tuple(m, n)
assert a[i, j] != b[i, j]
assert a[i, j] == b[i, j].subs(n, m)
assert b.func(*b.args) == b
assert b[i, j].func(*b[i, j].args) == b[i, j]
raises(IndexException, lambda: b[i])
raises(IndexException, lambda: b[i, i, j])
F = IndexedBase("F", shape=m)
assert F.shape == Tuple(m)
assert F[i].subs(i, j) == F[j]
raises(IndexException, lambda: F[i, j])
def test_Indexed_constructor():
i, j = symbols('i j', integer=True)
A = Indexed('A', i, j)
assert A == Indexed(Symbol('A'), i, j)
assert A == Indexed(IndexedBase('A'), i, j)
raises(TypeError, lambda: Indexed(A, i, j))
raises(IndexException, lambda: Indexed("A"))
def test_Indexed_func_args():
i, j = symbols('i j', integer=True)
a = symbols('a')
A = Indexed(a, i, j)
assert A == A.func(*A.args)
def test_Indexed_subs():
i, j, k = symbols('i j k', integer=True)
a, b = symbols('a b')
A = IndexedBase(a)
B = IndexedBase(b)
assert A[i, j] == B[i, j].subs(b, a)
assert A[i, j] == A[i, k].subs(k, j)
def test_Indexed_properties():
i, j = symbols('i j', integer=True)
A = Indexed('A', i, j)
assert A.rank == 2
assert A.indices == (i, j)
assert A.base == IndexedBase('A')
assert A.ranges == [None, None]
raises(IndexException, lambda: A.shape)
n, m = symbols('n m', integer=True)
assert Indexed('A', Idx(
i, m), Idx(j, n)).ranges == [Tuple(0, m - 1), Tuple(0, n - 1)]
assert Indexed('A', Idx(i, m), Idx(j, n)).shape == Tuple(m, n)
raises(IndexException, lambda: Indexed("A", Idx(i, m), Idx(j)).shape)
def test_Indexed_shape_precedence():
i, j = symbols('i j', integer=True)
o, p = symbols('o p', integer=True)
n, m = symbols('n m', integer=True)
a = IndexedBase('a', shape=(o, p))
assert a.shape == Tuple(o, p)
assert Indexed(
a, Idx(i, m), Idx(j, n)).ranges == [Tuple(0, m - 1), Tuple(0, n - 1)]
assert Indexed(a, Idx(i, m), Idx(j, n)).shape == Tuple(o, p)
assert Indexed(
a, Idx(i, m), Idx(j)).ranges == [Tuple(0, m - 1), Tuple(None, None)]
assert Indexed(a, Idx(i, m), Idx(j)).shape == Tuple(o, p)
def test_complex_indices():
i, j = symbols('i j', integer=True)
A = Indexed('A', i, i + j)
assert A.rank == 2
assert A.indices == (i, i + j)
def test_not_interable():
i, j = symbols('i j', integer=True)
A = Indexed('A', i, i + j)
assert not iterable(A)
def test_Indexed_coeff():
N = Symbol('N', integer=True)
len_y = N
i = Idx('i', len_y-1)
y = IndexedBase('y', shape=(len_y,))
a = (1/y[i+1]*y[i]).coeff(y[i])
b = (y[i]/y[i+1]).coeff(y[i])
assert a == b
def test_differentiation():
from sympy.functions.special.tensor_functions import KroneckerDelta
i, j, k, l = symbols('i j k l', cls=Idx)
a = symbols('a')
m, n = symbols("m, n", integer=True, finite=True)
assert m.is_real
h, L = symbols('h L', cls=IndexedBase)
hi, hj = h[i], h[j]
expr = hi
assert expr.diff(hj) == KroneckerDelta(i, j)
assert expr.diff(hi) == KroneckerDelta(i, i)
expr = S(2) * hi
assert expr.diff(hj) == S(2) * KroneckerDelta(i, j)
assert expr.diff(hi) == S(2) * KroneckerDelta(i, i)
assert expr.diff(a) == S.Zero
assert Sum(expr, (i, -oo, oo)).diff(hj) == Sum(2*KroneckerDelta(i, j), (i, -oo, oo))
assert Sum(expr.diff(hj), (i, -oo, oo)) == Sum(2*KroneckerDelta(i, j), (i, -oo, oo))
assert Sum(expr, (i, -oo, oo)).diff(hj).doit() == 2
assert Sum(expr.diff(hi), (i, -oo, oo)).doit() == Sum(2, (i, -oo, oo)).doit()
assert Sum(expr, (i, -oo, oo)).diff(hi).doit() == oo
expr = a * hj * hj / S(2)
assert expr.diff(hi) == a * h[j] * KroneckerDelta(i, j)
assert expr.diff(a) == hj * hj / S(2)
assert expr.diff(a, 2) == S.Zero
assert Sum(expr, (i, -oo, oo)).diff(hi) == Sum(a*KroneckerDelta(i, j)*h[j], (i, -oo, oo))
assert Sum(expr.diff(hi), (i, -oo, oo)) == Sum(a*KroneckerDelta(i, j)*h[j], (i, -oo, oo))
assert Sum(expr, (i, -oo, oo)).diff(hi).doit() == a*h[j]
assert Sum(expr, (j, -oo, oo)).diff(hi) == Sum(a*KroneckerDelta(i, j)*h[j], (j, -oo, oo))
assert Sum(expr.diff(hi), (j, -oo, oo)) == Sum(a*KroneckerDelta(i, j)*h[j], (j, -oo, oo))
assert Sum(expr, (j, -oo, oo)).diff(hi).doit() == a*h[i]
expr = a * sin(hj * hj)
assert expr.diff(hi) == 2*a*cos(hj * hj) * hj * KroneckerDelta(i, j)
assert expr.diff(hj) == 2*a*cos(hj * hj) * hj
expr = a * L[i, j] * h[j]
assert expr.diff(hi) == a*L[i, j]*KroneckerDelta(i, j)
assert expr.diff(hj) == a*L[i, j]
assert expr.diff(L[i, j]) == a*h[j]
assert expr.diff(L[k, l]) == a*KroneckerDelta(i, k)*KroneckerDelta(j, l)*h[j]
assert expr.diff(L[i, l]) == a*KroneckerDelta(j, l)*h[j]
assert Sum(expr, (j, -oo, oo)).diff(L[k, l]) == Sum(a * KroneckerDelta(i, k) * KroneckerDelta(j, l) * h[j], (j, -oo, oo))
assert Sum(expr, (j, -oo, oo)).diff(L[k, l]).doit() == a * KroneckerDelta(i, k) * h[l]
assert h[m].diff(h[m]) == 1
assert h[m].diff(h[n]) == KroneckerDelta(m, n)
assert Sum(a*h[m], (m, -oo, oo)).diff(h[n]) == Sum(a*KroneckerDelta(m, n), (m, -oo, oo))
assert Sum(a*h[m], (m, -oo, oo)).diff(h[n]).doit() == a
assert Sum(a*h[m], (n, -oo, oo)).diff(h[n]) == Sum(a*KroneckerDelta(m, n), (n, -oo, oo))
assert Sum(a*h[m], (m, -oo, oo)).diff(h[m]).doit() == oo*a
def test_indexed_series():
A = IndexedBase("A")
i = symbols("i", integer=True)
assert sin(A[i]).series(A[i]) == A[i] - A[i]**3/6 + A[i]**5/120 + Order(A[i]**6, A[i])
|
|
#!/usr/bin/env python
"""ClooudFormation custom Lambda backed resource that returns the latest AMI with the given parameters."""
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
########################################################
# Changelog
# 1.0.0
# Made compliant with major Python linters
# flake8 (pep8 & pyflakes)
# Disabled E501 (line length)
# Disabled E241 (whitespace after comma)
# OpenStack Style Guide
# Disabled H306 (alphabetize imports)
# pep257
# pycodestyle
# pylint
# Disabled C0301 (line length)
# Disabled C0326 (whitespace after comma)
from __future__ import print_function
import json
import time
import boto3
from botocore.vendored import requests
DEBUG_MODE = True # Manually change when debugging
try:
CFN_CLIENT = boto3.client('cloudformation')
except Exception as error:
print('Error creating boto3.client, error text follows:\n%s' % error)
raise Exception(error)
def format_response_body(event, context, response_status, response_data, physical_resource_id):
"""Format the response for Cloudformation."""
if DEBUG_MODE is True:
print("Started function format_response_body")
response_body = {}
response_body['Status'] = response_status
if response_status is "FAILED":
response_body['Reason'] = response_data['Message']
else:
response_body['Reason'] = "completed"
response_body['PhysicalResourceId'] = physical_resource_id or context.log_stream_name
response_body['StackId'] = event['StackId']
response_body['RequestId'] = event['RequestId']
response_body['LogicalResourceId'] = event['LogicalResourceId']
response_body['Data'] = response_data
if DEBUG_MODE is True:
print("Finished function format_response_body")
return response_body
def send(event, context, response_status, response_data, physical_resource_id):
"""Send a response."""
if 'ResponseURL' in event:
if DEBUG_MODE is True:
print("Started function send")
print("CF Response URL: " + event['ResponseURL'])
response_body = format_response_body(event, context, response_status, response_data, physical_resource_id)
json_response_body = json.dumps(response_body)
if DEBUG_MODE is True:
print("CF Response Body: %s" % str(json_response_body))
headers = {
'content-type': '',
'content-length': str(len(json_response_body))
}
try:
response = requests.put(
event['ResponseURL'],
data=json_response_body,
headers=headers
)
if DEBUG_MODE is True:
print("CF Status code: ", response.reason)
print("Finished function send")
except Exception as error: # pylint: disable=W0703
print("Failed to send event, raising exception without retrying.")
raise Exception(error)
def validate_role_on_create(event, context):
"""Validate the role we are running as is the right one."""
if DEBUG_MODE is True:
print("Started function validate_role_on_create")
try:
describe_stacks_response = CFN_CLIENT.describe_stacks(
StackName=event['StackId']
)
except Exception as error: # pylint: disable=W0703
custom_raise_exception(event, context, str('Error describing our stack to discover our IAM role, error text follows:\n' + str(error)))
if 'Stacks' in describe_stacks_response:
if describe_stacks_response['Stacks']:
if 'RoleARN' in describe_stacks_response['Stacks'][0]:
stack_role = describe_stacks_response['Stacks'][0]['RoleARN']
else:
stack_role = None
else:
stack_role = None
else:
stack_role = None
if DEBUG_MODE is True:
print("Finished function validate_role_on_create, role is %s" % stack_role)
def custom_raise_exception(event, context, message):
"""Raise an exception, print error, etc."""
print(message)
response_data = {
'Message': message
}
if event['StackId'] == '012345678910/fake-stack-id':
print("Skipping sending CloudFormation response due to local testing.")
else:
send(event, context, 'FAILED', response_data, None)
raise Exception(message)
def connect_to_region(event, context, region):
"""Connect to the given region."""
try:
ec2_client = boto3.client(
'ec2',
region_name=region,
)
except Exception as error: # pylint: disable=W0703
print("Failed to connect to given region, aborting.")
custom_raise_exception(event, context, error)
return ec2_client
def describe_images(event, context):
"""Perform the API call and lookup the AMIs that meet our search criteria."""
ec2_client = connect_to_region(event, context, event['ResourceProperties']['region'])
if event['ResourceProperties']['owners'] in (None, ''):
owners = []
else:
owners = event['ResourceProperties']['owners'].split(' ')
if event['ResourceProperties']['executable-users'] in (None, ''):
executable_users = []
else:
executable_users = event['ResourceProperties']['executable-users'].split(' ')
try:
filters = json.loads(event['ResourceProperties']['filters'])
except Exception as error: # pylint: disable=W0703
print("Failed to parse 'filters' parameter from JSON string to python dict, aborting.")
custom_raise_exception(event, context, error)
try:
response = ec2_client.describe_images(
ExecutableUsers=executable_users,
Filters=filters,
Owners=owners
)
except Exception as error: # pylint: disable=W0703
print("Failed to search AMIs, aborting.")
custom_raise_exception(event, context, error)
return response
def validate_inputs(event, context):
"""Evaluate our inputs and error if they are incorrect."""
if DEBUG_MODE is True:
print("Received event: \n%s" % json.dumps(event, indent=2))
if 'StackId' not in event or 'ResourceProperties' not in event:
custom_raise_exception(event, context, 'Malformed CloudFormation request, missing StackId or ResourceProperties.')
for parameter in ['owners', 'filters', 'executable-users', 'region']:
if parameter not in event['ResourceProperties']:
custom_raise_exception(event, context, 'Malformed CloudFormation request, missing one or more ResourceProperties.')
if event['StackId'] == '012345678910/fake-stack-id':
print("Skipping CloudFormation role validation due to local testing.")
else:
validate_role_on_create(event, context)
if DEBUG_MODE is True:
print("Stack ID : %s" % event['StackId'])
print("Stack Name : %s" % str(event['StackId']).split('/')[1])
def cloudformation_create(event, context):
"""Cloudformation called us with CreateStack."""
if DEBUG_MODE is True:
print("Create Option: Attempting to run creation")
image_dict = describe_images(event, context)
if DEBUG_MODE is True:
print(json.dumps(image_dict, indent=2))
if len(image_dict['Images']) < 1:
custom_raise_exception(event, context, 'AMI Search returned no results.')
newest_entry = {"CreationDate": "1980-01-01T01:01:01.000Z"}
counter = 0
for image in image_dict['Images']:
counter = counter + 1
if DEBUG_MODE is True:
print("Loop count: %d" % counter)
if time.strptime(image['CreationDate'], "%Y-%m-%dT%H:%M:%S.000Z") > time.strptime(newest_entry['CreationDate'], "%Y-%m-%dT%H:%M:%S.000Z"):
newest_entry = image
if DEBUG_MODE is True:
print("Found newer entry time of %s" % str(newest_entry['CreationDate']))
if DEBUG_MODE is True:
print(json.dumps(newest_entry, indent=2))
response_data = {
'ami-id': newest_entry['ImageId']
}
if event['StackId'] == '012345678910/fake-stack-id':
print("Skipping sending CloudFormation response due to local testing.")
return
send(event, context, 'SUCCESS', response_data, event['StackId'])
if DEBUG_MODE is True:
print("Exiting successfully")
return
def cloudformation_update(event, context):
"""Cloudformation called us with CreateStack."""
# For updates we run a new search, maybe the AMI has changed since the last time we ran?
cloudformation_create(event, context)
return
def lambda_handler(event, context):
"""Main Lambda function."""
validate_inputs(event, context)
if event['RequestType'] == 'Create':
cloudformation_create(event, context)
elif event['RequestType'] == 'Update':
cloudformation_update(event, context)
elif event['RequestType'] == 'Delete':
# This resource never truely creates anything so for deletes it just sends a success.
send(event, context, 'SUCCESS', {}, event['StackId'])
if DEBUG_MODE is True:
print("Exiting successfully")
return
if __name__ == '__main__':
# Example Linux/Mac CLI command we are replicating
# aws ec2 describe-images --owners self amazon --filters "Name=root-device-type,Values=ebs,Name=name,Values=amzn-ami-hvm-????.??.?.*ebs" --query 'Images[*].[CreationDate, ImageId, Name, Description]' --output text | sort -nr | head -n1 | awk '{print $2}'
# Note: The filters entry must use double quotes inside with encapsulating single quotes
TEST_EVENT = {
'StackId': '012345678910/fake-stack-id',
'RequestType': 'Create',
'ResourceProperties': {
'owners': 'self amazon',
'filters': '[{"Name":"root-device-type","Values":["ebs"]},{"Name":"name","Values":["amzn-ami-hvm-????.??.?.*gp2"]}]',
'executable-users': '',
'region': 'us-west-2'
}
}
TEST_CONTEXT = "bar"
lambda_handler(TEST_EVENT, TEST_CONTEXT)
|
|
'''
Created on 12/05/2011
@author: mikel
'''
import time
from spotify import link, session, SampleType
from collections import deque
import threading
#General buffer error
class BufferError(IOError):
pass
#Risen when stutter is detected
class BufferUnderrunError(BufferError):
pass
class BufferInitializationError(BufferError):
pass
class BufferStoppedError(BufferError):
pass
class AbstractBuffer:
def music_delivery(self, data, num_samples, sample_type, sample_rate, num_channels):
pass
def get_stats(self):
pass
def track_ended(self):
pass
class QueueItem:
data = None
num_samples = None
sample_type = None
sample_rate = None
num_channels = None
frame_time = None
def __init__(self, data, num_samples, sample_type, sample_rate, num_channels, frame_time):
self.data = data
self.num_samples = num_samples
self.sample_type = sample_type
self.sample_rate = sample_rate
self.num_channels = num_channels
self.frame_time = frame_time
class AudioBuffer(AbstractBuffer):
#OMG! It's full of vars!
#Queue that holds the in-memory framen numbers
__frames = None
#Dict that holds the actual frame data
__frame_data = None
#Number of underruns since last get_stats() call
__stutter = None
#Flag indicating that the playback was stopped
__playback_stopped = None
#Configured buffer length in seconds
__max_buffer_length = None
#Current buffer length in seconds
__buffer_length = None
#Number of samples in buffer (not used but required by libspotify)
__samples_in_buffer = None
#Total samples delivered by libspotify
__total_samples = None
#Session instance
__session = None
#Last (and highest) requested frame by any client
__last_frame = None
#Frame flagged as the last one
__end_frame = None
#Currently playing track object
__track = None
#Stores the time the buffer started
__start_time = None
def __init__(self, session, track, max_buffer_length = 10):
self.__frames = deque()
self.__frame_data = {}
self.__stutter = 0
self.__playback_stopped = False
self.__max_buffer_length = max_buffer_length
self.__buffer_length = 0
self.__samples_in_buffer = 0
self.__total_samples = 0
self.__session = session
self.__last_frame = -1
self.__end_frame = -1
#Load the track
self.__track = track
self.__session.player_load(self.__track)
def start(self):
#Start receiving data
self.__session.player_play(True)
def _remove_first_frame(self):
if len(self.__frames) > 0:
frame_id = self.__frames[0]
frame = self.__frame_data[frame_id]
#Update sums
self.__samples_in_buffer -= frame.num_samples
self.__buffer_length -= frame.frame_time
#Delete from the index first, then from the dict
del self.__frames[0]
del self.__frame_data[frame_id]
def _append_frame(self, data, num_samples, sample_type, sample_rate, num_channels, frame_time):
#Calculate the new frame id
frame_id = self.get_last_frame_in_buffer() + 1
#Save the data
self.__frame_data[frame_id] = QueueItem(
data,
num_samples,
sample_type,
sample_rate,
num_channels,
frame_time,
)
#Update the buffer
self.__buffer_length += frame_time
#Update the sample counts
self.__samples_in_buffer += num_samples
self.__total_samples += num_samples
#And finally index it on the queue
self.__frames.append(frame_id)
#Tell that all samples were consumed
return num_samples
def _will_fill_buffer(self, frame_time):
return frame_time + self.__buffer_length > self.__max_buffer_length
def _buffer_init_purge_check(self):
return (
#Do not calculate if the buffer was not consumed
self.__start_time is None or (
#Keep this check for performance reasons
self.get_first_frame_in_buffer() <= 0 and
#Don't purge if opened less than x seconds ago.
#We check if this number is less than half the
#max length of the buffer.
time.time() - self.__start_time < self.__max_buffer_length * 0.5
)
)
def _purge_frames(self):
while len(self.__frames) > 0:
#If we are on the start, don't purge for some seconds
if self._buffer_init_purge_check():
break
#Don't purge past the half of the buffer
elif self.__buffer_length < self.__max_buffer_length * 0.5:
break
#Break if reached to an undeletable frame
elif self.__frames[0] == self.__last_frame:
break
#Delete the first one
else:
self._remove_first_frame()
def get_first_frame_in_buffer(self):
if len(self.__frames) > 0:
return self.__frames[0]
else:
return -1
def get_last_frame_in_buffer(self):
if len(self.__frames) > 0:
return self.__frames[-1]
else:
return -1
def _get_sample_width(self, sample_type):
#FIXME: Duplicate code Arghhhhh!
if sample_type == SampleType.Int16NativeEndian:
return 16
else:
return -1
def music_delivery(self, data, num_samples, sample_type, sample_rate, num_channels):
#Calculate the length of this delivery in seconds
#frame_time = 1.0 * num_samples / sample_rate
#Get the sample size for further calculations
sample_size = self._get_sample_width(sample_type) / 8 * num_channels
while True:
#Calculate the time on this payload
frame_time = 1.0 * num_samples / sample_rate
#If the delivery does not fit, truncate it
if self._will_fill_buffer(frame_time) and num_samples > 0:
num_samples -= 1
data = data[:num_samples * sample_size]
#It fits, break off the loop
else:
break
#If there was no room for the frames, purge and return zero
if num_samples == 0:
self._purge_frames()
return 0
#Otherwise add the data
else:
return self._append_frame(
data, num_samples,
sample_type, sample_rate,
num_channels, frame_time
)
def get_stats(self):
stutter = self.__stutter
self.__stutter = 0
return self.__samples_in_buffer, stutter
def get_total_samples(self):
return self.__total_samples
def set_track_ended(self):
self.__end_frame = self.get_last_frame_in_buffer()
def get_frame(self, frame_num):
#What happens if this frame is not on the index?
if frame_num not in self.__frames:
#Buffer was stopped, and we depleted remaining frames
if self.__playback_stopped:
raise BufferStoppedError()
#Frame is no longer available
elif frame_num < self.get_first_frame_in_buffer():
raise BufferError("Frame number #%d gone, too late my friend." % frame_num)
#If it's ahead of the buffer, it's an underrun
else:
self.__stutter += 1
raise BufferUnderrunError("Frame #%d not yet available." % frame_num)
#Let's serve the frame
else:
#Get requested frame
frame = self.__frame_data[frame_num]
#If requested frame is higher than the last requested
if self.__last_frame < frame_num:
#Set if as the last requested one
self.__last_frame = frame_num
#Flag to indicate if there are frames left
has_frames = frame_num != self.__end_frame
# Store the time buffer was first requested.
# Also, do not account frame #0, since it's immediately
# requested after __init__()
if self.__start_time is None and frame_num != 0:
self.__start_time = time.time()
return frame, has_frames
def get_frame_wait(self, frame_num):
while True:
try:
return self.get_frame(frame_num)
#Wait a bit if we are ahead of the buffer
except BufferUnderrunError:
time.sleep(0.1)
def stop(self):
if not self.__playback_stopped:
self.__session.player_unload()
self.__playback_stopped = True
def is_stopped(self):
return self.__playback_stopped
def get_track(self):
return self.__track
class BufferManager(AbstractBuffer):
__current_buffer = None
__buffer_size = None
__buffer_open_lock= None
def __init__(self, buffer_size = 10):
self.__buffer_size = buffer_size
self.__buffer_open_lock = threading.Lock()
def _can_share_buffer(self, track):
"""
Check if the requested track and the current one are the same.
If true, check if the buffer is still on the start position, so
this thread can catch up it.
The result is a shared buffer between threads.
"""
return(
self.__current_buffer is not None and
str(track) == str(self.__current_buffer.get_track()) and
self.__current_buffer.get_first_frame_in_buffer() == 0
)
def open(self, session, track):
self.__buffer_open_lock.acquire()
try:
#If we can't share this buffer start a new one
if not self._can_share_buffer(track):
#Stop current buffer if any
if self.__current_buffer is not None:
self.__current_buffer.stop()
#Create the new buffer
self.__current_buffer = AudioBuffer(
session, track, self.__buffer_size
)
#And start receiving data
self.__current_buffer.start()
finally:
self.__buffer_open_lock.release()
return self.__current_buffer
def music_delivery(self, data, num_samples, sample_type, sample_rate, num_channels):
if self.__current_buffer is not None:
return self.__current_buffer.music_delivery(
data, num_samples, sample_type, sample_rate, num_channels
)
else:
return 0
def get_stats(self):
if self.__current_buffer is not None:
return self.__current_buffer.get_stats()
def set_track_ended(self):
if self.__current_buffer is not None:
self.__current_buffer.set_track_ended()
def stop(self):
if self.__current_buffer is not None:
self.__current_buffer.stop()
def cleanup(self):
self.__current_buffer = None
|
|
"""Extract reference documentation from the NumPy source tree.
"""
from __future__ import division, absolute_import, print_function
import inspect
import textwrap
import re
import pydoc
from warnings import warn
import collections
import sys
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l+1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class ParseError(Exception):
def __str__(self):
message = self.message
if hasattr(self, 'docstring'):
message = "%s in %r" % (message, self.docstring)
return message
class NumpyDocString(collections.Mapping):
def __init__(self, docstring, config={}):
orig_docstring = docstring
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Yields': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
try:
self._parse()
except ParseError as e:
e.docstring = orig_docstring
raise
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def __iter__(self):
return iter(self._parsed_data)
def __len__(self):
return len(self._parsed_data)
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc)-j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ParseError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
sections = list(self._read_sections())
section_names = set([section for section, content in sections])
has_returns = 'Returns' in section_names
has_yields = 'Yields' in section_names
# We could do more tests, but we are not. Arbitrarily.
if has_returns and has_yields:
msg = 'Docstring contains both a Returns and Yields section.'
raise ValueError(msg)
for (section, content) in sections:
if not section.startswith('..'):
section = (s.capitalize() for s in section.split(' '))
section = ' '.join(section)
if section in ('Parameters', 'Returns', 'Yields', 'Raises',
'Warns', 'Other Parameters', 'Attributes',
'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name)*symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', '\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
if param_type:
out += ['%s : %s' % (param, param_type)]
else:
out += [param]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Yields',
'Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
if hasattr(inspect, 'signature'):
signature = str(inspect.signature(func))
else:
# try to read signature, backward compat for older Python
if sys.version_info[0] >= 3:
argspec = inspect.getfullargspec(func)
else:
argspec = inspect.getargspec(func)
signature = inspect.formatargspec(*argspec)
signature = '%s%s' % (func_name, signature.replace('*', '\*'))
except TypeError:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
self.show_inherited_members = config.get(
'show_inherited_class_members', True)
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [('Methods', self.methods),
('Attributes', self.properties)]:
if not self[field]:
doc_list = []
for name in sorted(items):
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append((name, '', splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and isinstance(func, collections.Callable)
and self._is_show_member(name))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if (not name.startswith('_') and
(func is None or isinstance(func, property) or
inspect.isgetsetdescriptor(func))
and self._is_show_member(name))]
def _is_show_member(self, name):
if self.show_inherited_members:
return True # show all class members
if name not in self._cls.__dict__:
return False # class member is inherited, we do not show it
return True
|
|
# Python Classes/Functions used to Export Tycho's Datasets
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# TO-DO: Add time back to bookkeeping becasue of Tyler's code
# Importing Necessary System Packages
import os, io
import numpy as np
import matplotlib as plt
import random
import numpy
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.io import *
from amuse.lab import *
from amuse.couple import multiples
from amuse import io
# Import the Amuse Stellar Packages
from amuse.ic.kingmodel import new_king_model
from amuse.ic.kroupa import new_kroupa_mass_distribution
# Import cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
from tycho import util
#from tycho import multiples2 as multiples
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def write_initial_state(master_set, ic_array, file_prefix):
''' Writes out an initial state for the Tycho Module.
master_set: The Master Amuse Particle Set used in Tycho
ic_array: Predefined Numpy Array that Stores Initial Conditions in SI Units
file_prefix: String Value for a Prefix to the Saved File
'''
# First, Define/Make the Directory for the Initial State to be Stored
file_dir = os.getcwd()+"/InitialState"
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_base = file_dir+"/"+file_prefix
# Second, Write the AMUSE Particle Set to a HDF5 File
file_format = "hdf5"
write_set_to_file(master_set, file_base+"_particles.hdf5", format=file_format, close_file=True)
# Third, Pickle the Initial Conditions Array
ic_file = open(file_base+"_ic.pkl", "wb")
pickle.dump(ic_array, ic_file)
ic_file.close()
def write_time_step(gravity_set, current_time, file_prefix):
''' Writes out necessary information for a time step.
master_set: The Master AMUSE Particle Set used in Tycho
multiples_code: The Multiples Instance for Tycho
current_time: The Simulations Current Time
file_prefix: String Value for a Prefix to the Saved File
'''
# First, Define/Make the Directory for the Time Step to be Stored
file_dir_MS = os.getcwd()+"/Snapshots"
if not os.path.exists(file_dir_MS):
os.makedirs(file_dir_MS)
file_base_MS = file_dir_MS+"/"+file_prefix
# Second, Write the AMUSE Particle Set to a HDF5 File
file_format = "hdf5"
write_set_to_file(gravity_set, file_base_MS+"_MS_t%.3f.hdf5" %(current_time.number), \
format=file_format, close_file=True)
# ------------------------------------ #
# WRITING RESTART FILE #
# ------------------------------------ #
def write_state_to_file(time, stars_python,gravity_code, multiples_code, write_file, cp_hist=False, backup = 0 ):
res_dir = os.getcwd()+"/Restart"
if not os.path.exists(res_dir):
os.makedirs(res_dir)
print("Writing state to write file: ", write_file,"\n\n")
if write_file is not None:
particles = gravity_code.particles.copy()
write_channel = gravity_code.particles.new_channel_to(particles)
write_channel.copy_attribute("index_in_code", "id")
bookkeeping = {'neighbor_veto': multiples_code.neighbor_veto,
'neighbor_distance_factor': multiples_code.neighbor_distance_factor,
'multiples_external_tidal_correction': multiples_code.multiples_external_tidal_correction,
'multiples_integration_energy_error': multiples_code.multiples_integration_energy_error,
'multiples_internal_tidal_correction': multiples_code.multiples_internal_tidal_correction,
'model_time': multiples_code.model_time,
'root_index': multiples.root_index
}
for root, tree in multiples_code.root_to_tree.items():
root_in_particles = root.as_particle_in_set(particles)
subset = tree.get_tree_subset().copy()
if root_in_particles is not None:
root_in_particles.components = subset
io.write_set_to_file(particles,write_file+".stars.hdf5",'hdf5',version='2.0',
append_to_file=False, copy_history=cp_hist)
io.write_set_to_file(stars_python,write_file+".stars_python.hdf5",'hdf5',version='2.0',
append_to_file=False, copy_history=cp_hist)
config = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".conf", "wb") as f:
pickle.dump(config, f)
with open(write_file + ".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
print("\nState successfully written to: ", write_file)
print(time)
if backup > 0:
io.write_set_to_file(particles,write_file+".backup.stars.hdf5",'hdf5', version='2.0',
append_to_file=False, copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python,write_file+".backup.stars_python.hdf5",'hdf5',
version='2.0', append_to_file=False, copy_history=cp_hist,
close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".backup.conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + ".backup.bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
if backup > 2:
io.write_set_to_file(particles, write_file+"."+str(int(time.number))
+".stars.hdf5",'hdf5',version='2.0', append_to_file=False,
copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python, write_file+"."+str(int(time.number))
+".stars_python.hdf5",'hdf5',version='2.0', append_to_file=False,
copy_history=cp_hist, close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + "." +str(int(time.number))+".conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + "."+str(int(time.number))+".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
# ----------------------------------------- #
# WRITING CRASH RESTART FILE #
# ----------------------------------------- #
def write_crash_save(time, stars_python,gravity_code, multiples_code, write_file, cp_hist=False, backup = 0 ):
crash_dir = os.getcwd()+"/CrashSave"
if not os.path.exists(crash_dir):
os.makedirs(crash_dir)
print("Writing state to write file: ", write_file,"\n\n")
if write_file is not None:
particles = gravity_code.particles.copy()
write_channel = gravity_code.particles.new_channel_to(particles)
write_channel.copy_attribute("index_in_code", "id")
bookkeeping = {'neighbor_veto': multiples_code.neighbor_veto,
'neighbor_distance_factor': multiples_code.neighbor_distance_factor,
'multiples_external_tidal_correction': multiples_code.multiples_external_tidal_correction,
'multiples_integration_energy_error': multiples_code.multiples_integration_energy_error,
'multiples_internal_tidal_correction': multiples_code.multiples_internal_tidal_correction,
'model_time': multiples_code.model_time,
'root_index': multiples.root_index
}
'''
bookkeeping.neighbor_veto =
bookkeeping.multiples_external_tidal_correction = multiples_code.multiples_external_tidal_correction
bookkeeping.multiples_integration_energy_error = multiples_code.multiples_integration_energy_error
bookkeeping.multiples_internal_tidal_correction = multiples_code.multiples_internal_tidal_correction
bookkeeping.model_time = multiples_code.model_time
'''
for root, tree in multiples_code.root_to_tree.items():
#multiples.print_multiple_simple(tree,kep)
root_in_particles = root.as_particle_in_set(particles)
subset = tree.get_tree_subset().copy()
if root_in_particles is not None:
root_in_particles.components = subset
io.write_set_to_file(particles,write_file+".stars.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist)
io.write_set_to_file(stars_python,write_file+".stars_python.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist)
config = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".conf", "wb") as f:
pickle.dump(config, f)
with open(write_file + ".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
print("\nState successfully written to: ", write_file)
print(time)
if backup > 0:
io.write_set_to_file(particles,write_file+".backup.stars.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python,write_file+".backup.stars_python.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + ".backup.conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + ".backup.bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
if backup > 2:
io.write_set_to_file(particles,write_file+"."+str(int(time.number))+".stars.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
io.write_set_to_file(stars_python,write_file+"."+str(int(time.number))+".stars_python.hdf5",'hdf5',version='2.0', append_to_file=False, copy_history=cp_hist, close_file=True)
config2 = {'time' : time,
'py_seed': pickle.dumps(random.getstate()),
'numpy_seed': pickle.dumps(numpy.random.get_state()),
# 'options': pickle.dumps(options)
}
with open(write_file + "." +str(int(time.number))+".conf", "wb") as f:
pickle.dump(config2, f)
f.close()
with open(write_file + "."+str(int(time.number))+".bookkeeping", "wb") as f:
pickle.dump(bookkeeping, f)
f.close()
print("\nBackup write completed.\n")
|
|
#!/usr/bin/python
#
# N9 Personal Web Server
# 2012-02-19; Thomas Perl
# http://thp.io/2012/serverr/
#
# Copyright (c) 2012 Thomas Perl <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtDeclarative import *
import SimpleHTTPServer
import SocketServer
import threading
import sys
import subprocess
import re
import time
import string
import random
import os
from StringIO import StringIO
import cgi
import urllib
import mimetypes
app = QApplication(sys.argv)
settings = QSettings('thp.io', 'serverr')
def boolsetting(key, default=False):
global settings
# Kludge -> at startup we get 'false' instead of False
return (settings.value(key, default) not in ('false', False))
class SettingsProxy(QObject):
def __init__(self, settings):
QObject.__init__(self)
self._settings = settings
@Slot(str, result='QVariant')
def get(self, key):
return self._settings.value(key)
@Slot(str, 'QVariant')
def set(self, key, value):
self._settings.setValue(key, value)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class MyRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def directory_entry(self, f, fullname):
mimetype, _ = mimetypes.guess_type(fullname)
name = os.path.basename(fullname)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
icon = '/:theme:/icon-m-common-directory.png'
displayname = name + "/"
linkname = name + "/"
elif mimetype is None:
icon = '/:theme:/icon-m-content-file-unknown.png'
elif mimetype.startswith('audio/'):
icon = '/:theme:/icon-m-content-audio.png'
elif mimetype.startswith('video/'):
icon = '/:theme:/icon-m-content-videos.png'
elif mimetype.startswith('image/'):
icon = '/:theme:/icon-m-content-image.png'
elif mimetype == 'application/pdf':
icon = '/:theme:/icon-m-content-pdf.png'
elif mimetype == 'application/msword':
icon = '/:theme:/icon-m-content-word.png'
elif mimetype == 'application/vnd.ms-excel':
icon = '/:theme:/icon-m-content-excel.png'
elif mimetype == 'application/vnd.ms-powerpoint':
icon = '/:theme:/icon-m-content-powerpoint.png'
else:
icon = '/:theme:/icon-m-content-file-unknown.png'
f.write('<li><a href="%s"><img style="vertical-align: middle;" src="%s" width=32> %s</a>\n'
% (urllib.quote(linkname), icon, cgi.escape(displayname)))
def list_directory(self, path):
try:
listing = os.listdir(path)
except os.error:
self.send_error(404, "Cannot read directory")
return None
listing = filter(lambda x: not x.startswith('.'), sorted(listing, key=lambda x: (0 if os.path.isdir(os.path.join(path, x)) else 1, x.lower())))
if self.path != '/':
listing.insert(0, '..')
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write("<html>\n<head><title>%s on N9</title>\n" % displaypath)
f.write("""
<style type="text/css">
body { font-family: sans-serif; }
li a { padding: 7px; background-color: #eee;
text-decoration: none; display: block;
color: #333; }
li a:hover { background-color: #ddd; color: black; }
ul { list-style: none; margin: 0px; padding: 0px; }
img { width: 32px; height: 32px; border: 0px; }
hr { border-width: 0px; border-bottom: 1px solid #aaa; }
</style>
</head>""")
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in listing:
fullname = os.path.join(path, name)
self.directory_entry(f, fullname)
f.write("""
</ul><hr>
<address>
Powered by <a href="http://thp.io/2012/serverr/">Personal Web Server for N9</a>
</address>
</body></html>
""")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def do_GET(self):
authorized = False
auth_header = self.headers.get('authorization', '')
m = re.match(r'^Basic (.*)$', auth_header)
if m is not None:
auth_data = m.group(1).decode('base64').split(':', 1)
if len(auth_data) == 2:
username, password = auth_data
if username == 'client' and password == serverr.password:
authorized = True
if authorized:
if not self.path.startswith('/:theme:/'):
serverr.log_message = u'<b>%s</b> %s %s' % (
self.client_address[0],
self.command, self.path)
serverr.logMessage.emit()
if self.path.startswith('/:theme:/'):
filename = os.path.basename(self.path)
fn = '/usr/share/themes/blanco/meegotouch/icons/'+filename
if os.path.exists(fn):
f = open(fn, 'rb')
self.send_response(200)
self.send_header('Content-type', 'image/png')
self.end_headers()
self.copyfile(f, self.wfile)
f.close()
self.wfile.close()
return
path = self.translate_path(self.path)
f = None
if os.path.isdir(path) and path.endswith('/'):
self.list_directory(path)
else:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
self.send_response(401)
self.send_header('WWW-Authenticate',
'Basic realm="N9 Personal Web Server"')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<em>401 Access denied - wrong username/password')
self.wfile.close()
return
class Serverr(QObject):
PORT = 8888
def __init__(self):
QObject.__init__(self)
self.httpd = None
self.thread = None
self.password = self._newpassword()
self.log_message = u''
self.port_number = self.PORT
@Slot(result=unicode)
def getLogMessage(self):
return self.log_message
def _newpassword(self, length=5):
# Avoid some characters that look alike in certain fonts
charset = [x for x in string.letters + string.digits if x not in 'IlO0']
return ''.join([random.choice(charset) for _ in range(length)])
logMessage = Signal()
def getCurrentStatus(self):
if self.httpd is not None:
return u'Serving on port %d' % self.port_number
else:
return u'Not running'
currentStatusChanged = Signal()
currentStatus = Property(unicode,
fget=getCurrentStatus,
notify=currentStatusChanged)
def getCurrentPassword(self):
return unicode(self.password)
currentPasswordChanged = Signal()
currentPassword = Property(unicode,
fget=getCurrentPassword,
notify=currentPasswordChanged)
@Slot()
def generateNewPassword(self):
self.password = self._newpassword()
self.currentPasswordChanged.emit()
@Slot()
def start(self):
if self.httpd is None:
self.thread = threading.Thread(target=self.thread_proc)
self.thread.setDaemon(True)
self.thread.start()
@Slot()
def stop(self):
if self.httpd is not None:
self.httpd.shutdown()
self.httpd = None
self.currentStatusChanged.emit()
@Slot(result=unicode)
def get_ips(self):
ifconfig = subprocess.Popen('/sbin/ifconfig', stdout=subprocess.PIPE)
stdout, stderr = ifconfig.communicate()
ips = re.findall('addr:([^ ]+)', stdout)
ips = filter(lambda ip: not ip.startswith('127.'), ips) or None
if ips is None:
return u'You are offline.'
return u'<center>Point your browser to:<br>' + u'<br>or '.join(u'http://%s:%d/' % (ip, self.port_number)
for ip in ips) + '</center>'
def thread_proc(self):
os.chdir('/home/user/MyDocs/')
self.port_number = self.PORT
while self.port_number < self.PORT + 100:
try:
self.httpd = ThreadedTCPServer(("", self.port_number),
MyRequestHandler)
break
except:
self.port_number += 1
self.currentStatusChanged.emit()
self.httpd.serve_forever()
self.thread = None
view = QDeclarativeView()
rootContext = view.rootContext()
proxy = SettingsProxy(settings)
rootContext.setContextProperty('settings', proxy)
serverr = Serverr()
rootContext.setContextProperty('serverr', serverr)
view.setSource('/opt/serverr/serverr.qml')
view.showFullScreen()
app.exec_()
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gsutil API delegator for interacting with cloud storage providers."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import boto
from boto import config
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import CloudApi
from gslib.cs_api_map import ApiMapConstants
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
class CloudApiDelegator(CloudApi):
"""Class that handles delegating requests to gsutil Cloud API implementations.
This class is responsible for determining at runtime which gsutil Cloud API
implementation should service the request based on the Cloud storage provider,
command-level API support, and configuration file override.
During initialization it takes as an argument a gsutil_api_map which maps
providers to their default and supported gsutil Cloud API implementations
(see comments in cs_api_map for details).
Instantiation of multiple delegators per-thread is required for multiprocess
and/or multithreaded operations. Calling methods on the same delegator in
multiple threads is unsafe.
"""
def __init__(self,
bucket_storage_uri_class,
gsutil_api_map,
logger,
status_queue,
provider=None,
debug=0,
trace_token=None,
perf_trace_token=None,
user_project=None):
"""Performs necessary setup for delegating cloud storage requests.
This function has different arguments than the gsutil Cloud API __init__
function because of the delegation responsibilties of this class.
Args:
bucket_storage_uri_class: boto storage_uri class, used by APIs that
provide boto translation or mocking.
gsutil_api_map: Map of providers and API selector tuples to api classes
which can be used to communicate with those providers.
logger: logging.logger for outputting log messages.
status_queue: Queue for relaying status to UI.
provider: Default provider prefix describing cloud storage provider to
connect to.
debug: Debug level for the API implementation (0..3).
trace_token: Apiary trace token to pass to API.
perf_trace_token: Performance trace token to use when making API calls.
user_project: Project to be billed for this project.
"""
super(CloudApiDelegator, self).__init__(bucket_storage_uri_class,
logger,
status_queue,
provider=provider,
debug=debug,
trace_token=trace_token,
perf_trace_token=perf_trace_token,
user_project=user_project)
self.api_map = gsutil_api_map
self.prefer_api = boto.config.get('GSUtil', 'prefer_api', '').upper()
self.loaded_apis = {}
if not self.api_map[ApiMapConstants.API_MAP]:
raise ArgumentException('No apiclass supplied for gsutil Cloud API map.')
def _GetApi(self, provider):
"""Returns a valid CloudApi for use by the caller.
This function lazy-loads connection and credentials using the API map
and credential store provided during class initialization.
Args:
provider: Provider to load API for. If None, class-wide default is used.
Raises:
ArgumentException if there is no matching API available in the API map.
Returns:
Valid API instance that can be used to communicate with the Cloud
Storage provider.
"""
provider = provider or self.provider
if not provider:
raise ArgumentException('No provider selected for _GetApi')
provider = str(provider)
if provider not in self.loaded_apis:
self.loaded_apis[provider] = {}
api_selector = self.GetApiSelector(provider)
if api_selector not in self.loaded_apis[provider]:
# Need to load the API.
self._LoadApi(provider, api_selector)
return self.loaded_apis[provider][api_selector]
def _LoadApi(self, provider, api_selector):
"""Loads a CloudApi into the loaded_apis map for this class.
Args:
provider: Provider to load the API for.
api_selector: cs_api_map.ApiSelector defining the API type.
"""
if provider not in self.api_map[ApiMapConstants.API_MAP]:
raise ArgumentException(
'gsutil Cloud API map contains no entry for provider %s.' % provider)
if api_selector not in self.api_map[ApiMapConstants.API_MAP][provider]:
raise ArgumentException(
'gsutil Cloud API map does not support API %s for provider %s.' %
(api_selector, provider))
self.loaded_apis[provider][api_selector] = (
self.api_map[ApiMapConstants.API_MAP][provider][api_selector](
self.bucket_storage_uri_class,
self.logger,
self.status_queue,
provider=provider,
debug=self.debug,
trace_token=self.trace_token,
perf_trace_token=self.perf_trace_token,
user_project=self.user_project))
def GetApiSelector(self, provider=None):
"""Returns a cs_api_map.ApiSelector based on input and configuration.
Args:
provider: Provider to return the ApiSelector for. If None, class-wide
default is used.
Returns:
cs_api_map.ApiSelector that will be used for calls to the delegator
for this provider.
"""
selected_provider = provider or self.provider
if not selected_provider:
raise ArgumentException('No provider selected for CloudApi')
if (selected_provider not in self.api_map[ApiMapConstants.DEFAULT_MAP] or
self.api_map[ApiMapConstants.DEFAULT_MAP][selected_provider] not in
self.api_map[ApiMapConstants.API_MAP][selected_provider]):
raise ArgumentException('No default api available for provider %s' %
selected_provider)
if selected_provider not in self.api_map[ApiMapConstants.SUPPORT_MAP]:
raise ArgumentException('No supported apis available for provider %s' %
selected_provider)
api = self.api_map[ApiMapConstants.DEFAULT_MAP][selected_provider]
using_gs_hmac = (
provider == 'gs' and
not config.has_option('Credentials', 'gs_oauth2_refresh_token') and
not (config.has_option('Credentials', 'gs_service_client_id') and
config.has_option('Credentials', 'gs_service_key_file')) and
(config.has_option('Credentials', 'gs_access_key_id') and
config.has_option('Credentials', 'gs_secret_access_key')))
configured_encryption = (provider == 'gs' and
(config.has_option('GSUtil', 'encryption_key') or
config.has_option('GSUtil', 'decryption_key1')))
if using_gs_hmac and configured_encryption:
raise CommandException(
'gsutil does not support HMAC credentials with customer-supplied '
'encryption keys (CSEK) or customer-managed KMS encryption keys '
'(CMEK). Please generate and include non-HMAC credentials '
'in your .boto configuration file, or to access public encrypted '
'objects, remove your HMAC credentials.')
# If we have only HMAC credentials for Google Cloud Storage, we must use
# the XML API as the JSON API does not support HMAC.
#
# Technically if we have only HMAC credentials, we should still be able to
# access public read resources via the JSON API, but the XML API can do
# that just as well. It is better to use it than inspect the credentials on
# every HTTP call.
elif using_gs_hmac:
api = ApiSelector.XML
# CSEK and CMEK encryption keys are currently only supported in the
# JSON API implementation (GcsJsonApi). We can't stop XML API users from
# interacting with encrypted objects, since we don't know the object is
# encrypted until after the API call is made, but if they specify
# configuration values we will use JSON.
elif configured_encryption:
api = ApiSelector.JSON
# Try to force the user's preference to a supported API.
elif self.prefer_api in (
self.api_map[ApiMapConstants.SUPPORT_MAP][selected_provider]):
api = self.prefer_api
return api
# For function docstrings, see CloudApi class.
def GetBucket(self, bucket_name, provider=None, fields=None):
return self._GetApi(provider).GetBucket(bucket_name, fields=fields)
def GetBucketIamPolicy(self, bucket_name, provider=None, fields=None):
return self._GetApi(provider).GetBucketIamPolicy(bucket_name, fields=fields)
def SetBucketIamPolicy(self, bucket_name, policy, provider=None):
return self._GetApi(provider).SetBucketIamPolicy(bucket_name, policy)
def ListBuckets(self, project_id=None, provider=None, fields=None):
return self._GetApi(provider).ListBuckets(project_id=project_id,
fields=fields)
def PatchBucket(self,
bucket_name,
metadata,
canned_acl=None,
canned_def_acl=None,
preconditions=None,
provider=None,
fields=None):
return self._GetApi(provider).PatchBucket(bucket_name,
metadata,
canned_acl=canned_acl,
canned_def_acl=canned_def_acl,
preconditions=preconditions,
fields=fields)
def LockRetentionPolicy(self, bucket_name, metageneration, provider=None):
return self._GetApi(provider).LockRetentionPolicy(bucket_name,
metageneration,
provider=provider)
def CreateBucket(self,
bucket_name,
project_id=None,
metadata=None,
provider=None,
fields=None):
return self._GetApi(provider).CreateBucket(bucket_name,
project_id=project_id,
metadata=metadata,
fields=fields)
def DeleteBucket(self, bucket_name, preconditions=None, provider=None):
return self._GetApi(provider).DeleteBucket(bucket_name,
preconditions=preconditions)
def GetObjectIamPolicy(self,
bucket_name,
object_name,
generation=None,
provider=None,
fields=None):
return self._GetApi(provider).GetObjectIamPolicy(bucket_name,
object_name,
generation,
fields=fields)
def SetObjectIamPolicy(self,
bucket_name,
object_name,
policy,
generation=None,
provider=None):
return self._GetApi(provider).SetObjectIamPolicy(bucket_name, object_name,
policy, generation)
def ListObjects(self,
bucket_name,
prefix=None,
delimiter=None,
all_versions=None,
provider=None,
fields=None):
return self._GetApi(provider).ListObjects(bucket_name,
prefix=prefix,
delimiter=delimiter,
all_versions=all_versions,
fields=fields)
def GetObjectMetadata(self,
bucket_name,
object_name,
generation=None,
provider=None,
fields=None):
return self._GetApi(provider).GetObjectMetadata(bucket_name,
object_name,
generation=generation,
fields=fields)
def PatchObjectMetadata(self,
bucket_name,
object_name,
metadata,
canned_acl=None,
generation=None,
preconditions=None,
provider=None,
fields=None):
return self._GetApi(provider).PatchObjectMetadata(
bucket_name,
object_name,
metadata,
canned_acl=canned_acl,
generation=generation,
preconditions=preconditions,
fields=fields)
def GetObjectMedia(self,
bucket_name,
object_name,
download_stream,
provider=None,
generation=None,
object_size=None,
compressed_encoding=False,
download_strategy=CloudApi.DownloadStrategy.ONE_SHOT,
start_byte=0,
end_byte=None,
progress_callback=None,
serialization_data=None,
digesters=None,
decryption_tuple=None):
return self._GetApi(provider).GetObjectMedia(
bucket_name,
object_name,
download_stream,
compressed_encoding=compressed_encoding,
download_strategy=download_strategy,
start_byte=start_byte,
end_byte=end_byte,
generation=generation,
object_size=object_size,
progress_callback=progress_callback,
serialization_data=serialization_data,
digesters=digesters,
decryption_tuple=decryption_tuple)
def UploadObject(self,
upload_stream,
object_metadata,
size=None,
canned_acl=None,
preconditions=None,
progress_callback=None,
encryption_tuple=None,
provider=None,
fields=None,
gzip_encoded=False):
return self._GetApi(provider).UploadObject(
upload_stream,
object_metadata,
size=size,
canned_acl=canned_acl,
preconditions=preconditions,
progress_callback=progress_callback,
encryption_tuple=encryption_tuple,
fields=fields,
gzip_encoded=gzip_encoded)
def UploadObjectStreaming(self,
upload_stream,
object_metadata,
canned_acl=None,
preconditions=None,
progress_callback=None,
encryption_tuple=None,
provider=None,
fields=None,
gzip_encoded=False):
return self._GetApi(provider).UploadObjectStreaming(
upload_stream,
object_metadata,
canned_acl=canned_acl,
preconditions=preconditions,
progress_callback=progress_callback,
encryption_tuple=encryption_tuple,
fields=fields,
gzip_encoded=gzip_encoded)
def UploadObjectResumable(self,
upload_stream,
object_metadata,
canned_acl=None,
preconditions=None,
size=None,
serialization_data=None,
tracker_callback=None,
progress_callback=None,
encryption_tuple=None,
provider=None,
fields=None,
gzip_encoded=False):
return self._GetApi(provider).UploadObjectResumable(
upload_stream,
object_metadata,
canned_acl=canned_acl,
preconditions=preconditions,
size=size,
serialization_data=serialization_data,
tracker_callback=tracker_callback,
progress_callback=progress_callback,
encryption_tuple=encryption_tuple,
fields=fields,
gzip_encoded=gzip_encoded)
def CopyObject(self,
src_obj_metadata,
dst_obj_metadata,
src_generation=None,
canned_acl=None,
preconditions=None,
progress_callback=None,
max_bytes_per_call=None,
encryption_tuple=None,
decryption_tuple=None,
provider=None,
fields=None):
return self._GetApi(provider).CopyObject(
src_obj_metadata,
dst_obj_metadata,
src_generation=src_generation,
canned_acl=canned_acl,
preconditions=preconditions,
progress_callback=progress_callback,
max_bytes_per_call=max_bytes_per_call,
encryption_tuple=encryption_tuple,
decryption_tuple=decryption_tuple,
fields=fields)
def ComposeObject(self,
src_objs_metadata,
dst_obj_metadata,
preconditions=None,
encryption_tuple=None,
provider=None,
fields=None):
return self._GetApi(provider).ComposeObject(
src_objs_metadata,
dst_obj_metadata,
preconditions=preconditions,
encryption_tuple=encryption_tuple,
fields=fields)
def DeleteObject(self,
bucket_name,
object_name,
preconditions=None,
generation=None,
provider=None):
return self._GetApi(provider).DeleteObject(bucket_name,
object_name,
preconditions=preconditions,
generation=generation)
def WatchBucket(self,
bucket_name,
address,
channel_id,
token=None,
provider=None,
fields=None):
return self._GetApi(provider).WatchBucket(bucket_name,
address,
channel_id,
token=token,
fields=fields)
def StopChannel(self, channel_id, resource_id, provider=None):
return self._GetApi(provider).StopChannel(channel_id, resource_id)
def ListChannels(self, bucket_name, provider=None):
return self._GetApi(provider).ListChannels(bucket_name)
def GetProjectServiceAccount(self, project_number, provider=None):
return self._GetApi(provider).GetProjectServiceAccount(project_number)
def CreateNotificationConfig(self,
bucket_name,
pubsub_topic,
payload_format,
event_types=None,
custom_attributes=None,
object_name_prefix=None,
provider=None):
return self._GetApi(provider).CreateNotificationConfig(
bucket_name, pubsub_topic, payload_format, event_types,
custom_attributes, object_name_prefix)
def DeleteNotificationConfig(self, bucket_name, notification, provider=None):
return self._GetApi(provider).DeleteNotificationConfig(
bucket_name, notification)
def ListNotificationConfigs(self, bucket_name, provider=None):
return self._GetApi(provider).ListNotificationConfigs(bucket_name)
def ListBucketAccessControls(self, bucket_name, provider=None):
return self._GetApi(provider).ListBucketAccessControls(bucket_name)
def ListObjectAccessControls(self, bucket_name, object_name, provider=None):
return self._GetApi(provider).ListObjectAccessControls(
bucket_name, object_name)
def CreateHmacKey(self, project_id, service_account_email, provider=None):
return self._GetApi(provider).CreateHmacKey(project_id,
service_account_email)
def DeleteHmacKey(self, project_id, access_id, provider=None):
return self._GetApi(provider).DeleteHmacKey(project_id, access_id)
def GetHmacKey(self, project_id, access_id, provider=None):
return self._GetApi(provider).GetHmacKey(project_id, access_id)
def ListHmacKeys(self,
project_id,
service_account_email,
show_deleted_keys=False,
provider=None):
return self._GetApi(provider).ListHmacKeys(project_id,
service_account_email,
show_deleted_keys)
def UpdateHmacKey(self, project_id, access_id, state, etag, provider=None):
return self._GetApi(provider).UpdateHmacKey(project_id, access_id, state,
etag)
def XmlPassThroughGetAcl(self, storage_url, def_obj_acl=False, provider=None):
"""XML compatibility function for getting ACLs.
Args:
storage_url: StorageUrl object.
def_obj_acl: If true, get the default object ACL on a bucket.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
ACL XML for the resource specified by storage_url.
"""
return self._GetApi(provider).XmlPassThroughGetAcl(storage_url,
def_obj_acl=def_obj_acl)
def XmlPassThroughSetAcl(self,
acl_text,
storage_url,
canned=True,
def_obj_acl=False,
provider=None):
"""XML compatibility function for setting ACLs.
Args:
acl_text: XML ACL or canned ACL string.
storage_url: StorageUrl object.
canned: If true, acl_text is treated as a canned ACL string.
def_obj_acl: If true, set the default object ACL on a bucket.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
self._GetApi(provider).XmlPassThroughSetAcl(acl_text,
storage_url,
canned=canned,
def_obj_acl=def_obj_acl)
def XmlPassThroughGetCors(self, storage_url, provider=None):
"""XML compatibility function for getting CORS configuration on a bucket.
Args:
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
CORS configuration XML for the bucket specified by storage_url.
"""
return self._GetApi(provider).XmlPassThroughGetCors(storage_url)
def XmlPassThroughSetCors(self, cors_text, storage_url, provider=None):
"""XML compatibility function for setting CORS configuration on a bucket.
Args:
cors_text: Raw CORS XML string.
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
self._GetApi(provider).XmlPassThroughSetCors(cors_text, storage_url)
def XmlPassThroughGetLifecycle(self, storage_url, provider=None):
"""XML compatibility function for getting lifecycle config on a bucket.
Args:
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Lifecycle configuration XML for the bucket specified by storage_url.
"""
return self._GetApi(provider).XmlPassThroughGetLifecycle(storage_url)
def XmlPassThroughSetLifecycle(self,
lifecycle_text,
storage_url,
provider=None):
"""XML compatibility function for setting lifecycle config on a bucket.
Args:
lifecycle_text: Raw lifecycle configuration XML string.
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
self._GetApi(provider).XmlPassThroughSetLifecycle(lifecycle_text,
storage_url)
def XmlPassThroughGetLogging(self, storage_url, provider=None):
"""XML compatibility function for getting logging configuration on a bucket.
Args:
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Logging configuration XML for the bucket specified by storage_url.
"""
return self._GetApi(provider).XmlPassThroughGetLogging(storage_url)
def XmlPassThroughSetTagging(self, tags_text, storage_url, provider=None):
"""XML compatibility function for setting tagging configuration on a bucket.
This passthrough provides support for setting a tagging configuration
(equivalent to a label configuration) on a cloud bucket.
Args:
tags_text: Raw tagging configuration XML string.
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
None.
"""
return self._GetApi(provider).XmlPassThroughSetTagging(
tags_text, storage_url)
def XmlPassThroughGetTagging(self, storage_url, provider=None):
"""XML compatibility function for getting tagging configuration on a bucket.
Args:
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Tagging configuration XML for the bucket specified by storage_url.
"""
return self._GetApi(provider).XmlPassThroughGetTagging(storage_url)
def XmlPassThroughGetWebsite(self, storage_url, provider=None):
"""XML compatibility function for getting website configuration on a bucket.
Args:
storage_url: StorageUrl object.
provider: Cloud storage provider to connect to. If not present,
class-wide default is used.
Raises:
ArgumentException for errors during input validation.
ServiceException for errors interacting with cloud storage providers.
Returns:
Website configuration XML for the bucket specified by storage_url.
"""
return self._GetApi(provider).XmlPassThroughGetWebsite(storage_url)
|
|
#! /usr/bin/env python
"""Basic Model Interface implementation for deltaRCM."""
import types
import numpy as np
from bmi import Bmi
from .deltaRCM import DeltaRCM
class BmiDeltaRCM(Bmi):
"""Create deltas from random walks."""
_name = 'DeltaRCM'
_input_var_names = ('surface__elevation',)
_output_var_names = ('surface__elevation',)
def __init__(self):
"""Create a BmiDeltaRCM model that is ready for initialization."""
self._model = None
self._values = {}
self._var_units = {}
self._grids = {}
self._grid_type = {}
def initialize(self):
"""Initialize the deltaRCM model.
Parameters
----------
filename : str, optional
Path to name of input file.
"""
self._model = DeltaRCM()
self._values = {
'surface__elevation': self._model.eta,
}
self._var_units = {
'surface__elevation': 'm'
}
self._grids = {
0: ['surface__elevation']
}
self._grid_type = {
0: 'uniform_rectilinear_grid'
}
def update(self):
"""Advance model by one time step."""
self._model.advance_in_time()
def update_frac(self, time_frac):
"""Update model by a fraction of a time step.
Parameters
----------
time_frac : float
Fraction fo a time step.
"""
time_step = self.get_time_step()
self._model.time_step = time_frac * time_step
Np_water = self._model.Np_water
Np_sed = self._model.Np_sed
self._model.Np_water = int(time_frac * Np_water)
self._model.Np_sed = int(time_frac * Np_sed)
self.update()
self._model.time_step = time_step
self._model.Np_water = Np_water
self._model.Np_sed = Np_sed
def update_until(self, then):
"""Update model until a particular time.
Parameters
----------
then : float
Time to run model until.
"""
n_steps = (then - self.get_current_time()) / self.get_time_step()
for _ in xrange(int(n_steps)):
self.update()
self.update_frac(n_steps - int(n_steps))
def finalize(self):
"""Finalize model."""
self._model = None
def get_var_type(self, var_name):
"""Data type of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
str
Data type.
"""
return str(self.get_value_ref(var_name).dtype)
def get_var_units(self, var_name):
"""Get units of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
str
Variable units.
"""
return self._var_units[var_name]
def get_var_nbytes(self, var_name):
"""Get units of variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
int
Size of data array in bytes.
"""
return self.get_value_ref(var_name).nbytes
def get_var_grid(self, var_name):
"""Grid id for a variable.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
int
Grid id.
"""
for grid_id, var_name_list in self._grids.items():
if var_name in var_name_list:
return grid_id
def get_grid_rank(self, grid_id):
"""Rank of grid.
Parameters
----------
grid_id : int
Identifier of a grid.
Returns
-------
int
Rank of grid.
"""
return len(self.get_grid_shape(grid_id))
def get_grid_size(self, grid_id):
"""Size of grid.
Parameters
----------
grid_id : int
Identifier of a grid.
Returns
-------
int
Size of grid.
"""
return np.prod(self.get_grid_shape(grid_id))
def get_value_ref(self, var_name):
"""Reference to values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
array_like
Value array.
"""
return self._values[var_name]
def get_value(self, var_name):
"""Copy of values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
Returns
-------
array_like
Copy of values.
"""
return self.get_value_ref(var_name).copy()
def get_value_at_indices(self, var_name, indices):
"""Get values at particular indices.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
indices : array_like
Array of indices.
Returns
-------
array_like
Values at indices.
"""
return self.get_value_ref(var_name).take(indices)
def set_value(self, var_name, src):
"""Set model values.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
src : array_like
Array of new values.
"""
val = self.get_value_ref(var_name)
val[:] = src
def set_value_at_indices(self, var_name, src, indices):
"""Set model values at particular indices.
Parameters
----------
var_name : str
Name of variable as CSDMS Standard Name.
src : array_like
Array of new values.
indices : array_like
Array of indices.
"""
val = self.get_value_ref(var_name)
val.flat[indices] = src
def get_component_name(self):
"""Name of the component."""
return self._name
def get_input_var_names(self):
"""Get names of input variables."""
return self._input_var_names
def get_output_var_names(self):
"""Get names of output variables."""
return self._output_var_names
def get_grid_shape(self, grid_id):
"""Number of rows and columns of uniform rectilinear grid."""
var_name = self._grids[grid_id][0]
return self.get_value_ref(var_name).shape
def get_grid_spacing(self, grid_id):
"""Spacing of rows and columns of uniform rectilinear grid."""
return self._model.spacing
def get_grid_origin(self, grid_id):
"""Origin of uniform rectilinear grid."""
return self._model.origin
def get_grid_type(self, grid_id):
"""Type of grid."""
return self._grid_type[grid_id]
def get_start_time(self):
"""Start time of model."""
return 0.
def get_end_time(self):
"""End time of model."""
return np.finfo('d').max
def get_current_time(self):
"""Current time of model."""
return self._model.time
def get_time_step(self):
"""Time step of model."""
return self._model.time_step
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common functions for BFGS and L-BFGS algorithm."""
import collections
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.optimizer import linesearch
# A namedtuple to hold the point at which a line function is evaluated, the
# value of the function, directional derivative, and full gradient evaluated
# evaluated at that point. To be used with the linesearch method.
ValueAndGradient = collections.namedtuple('ValueAndGradient',
['x', 'f', 'df', 'full_gradient'])
def converged_any(converged, failed):
"""Condition to stop when any batch member converges, or all have failed."""
return tf.reduce_any(converged) | tf.reduce_all(failed)
def converged_all(converged, failed):
"""Condition to stop when all batch members have converged or failed."""
return tf.reduce_all(converged | failed)
def get_initial_state_args(value_and_gradients_function,
initial_position,
grad_tolerance,
control_inputs=None):
"""Returns a dictionary to populate the initial state of the search procedure.
Performs an initial convergence check and the first evaluation of the
objective function.
Args:
value_and_gradients_function: A Python callable that accepts a tensor and
returns a tuple of two tensors: the objective function value and its
derivative.
initial_position: The starting point of the search procedure.
grad_tolerance: The gradient tolerance for the procedure.
control_inputs: Optional ops used to assert the validity of inputs, these
are added as control dependencies to execute before the objective
function is evaluated for the first time.
Returns:
An dictionary with values for the following keys:
converged: True if the convergence check finds that the initial position
is already an argmin of the objective function.
failed: Initialized to False.
num_objective_evaluations: Initialized to 1.
position: Initialized to the initial position.
objective_value: Initialized to the value of the objective function at
the initial position.
objective_gradient: Initialized to the gradient of the objective
function at the initial position.
"""
if control_inputs:
with tf.control_dependencies(control_inputs):
f0, df0 = value_and_gradients_function(initial_position)
else:
f0, df0 = value_and_gradients_function(initial_position)
# This is a gradient-based convergence check. We only do it for finite
# objective values because we assume the gradient reported at a position with
# a non-finite objective value is untrustworthy. The main loop handles
# non-finite objective values itself (see `terminate_if_not_finite`).
init_converged = tf.math.is_finite(f0) & (norm(df0, dims=1) < grad_tolerance)
return dict(
converged=init_converged,
failed=tf.zeros_like(init_converged), # i.e. False.
num_iterations=tf.convert_to_tensor(0),
num_objective_evaluations=tf.convert_to_tensor(1),
position=initial_position,
objective_value=f0,
objective_gradient=df0)
def terminate_if_not_finite(state, value=None, gradient=None):
"""Terminates optimization if the objective or gradient values are not finite.
Specifically,
- If the objective is -inf, stop with success, since this position is a global
minimum.
- Otherwise, if the objective or any component of the gradient is not finite,
stop with failure.
Why fail?
- If the objective is nan, it could be a global minimum, but we can't know.
- If the objective is +inf, we can't trust the gradient, so we can't know
where to go next. This should only ever happen on the first iteration,
because the line search avoids returning points whose objective values are
+inf.
- If the gradient has any nonfinite values, we can't use it to move a finite
amount.
Args:
state: A BfgsOptimizerResults or LbfgsOptimizerResults representing the
current position and information about it.
value: A Tensor giving the value of the objective function.
`state.objective_value` if not supplied.
gradient: A Tensor giving the gradient of the objective function.
`state.objective_gradient` if not supplied.
Returns:
state: A namedputple of the same type with possibly updated `converged` and
`failed` fields.
"""
if value is None:
value = state.objective_value
if gradient is None:
gradient = state.objective_gradient
minus_inf_mask = _is_negative_inf(value)
state = state._replace(converged=state.converged | minus_inf_mask)
non_finite_mask = (
~tf.math.is_finite(value) |
tf.reduce_any(~tf.math.is_finite(gradient), axis=-1))
state = state._replace(failed=state.failed |
(~state.converged & non_finite_mask))
return state
def _is_negative_inf(x):
return x <= tf.constant(float('-inf'), dtype=x.dtype)
def line_search_step(state, value_and_gradients_function, search_direction,
grad_tolerance, f_relative_tolerance, x_tolerance,
stopping_condition, max_iterations, f_absolute_tolerance):
"""Performs the line search step of the BFGS search procedure.
Uses hager_zhang line search procedure to compute a suitable step size
to advance the current `state.position` along the given `search_direction`.
Also, if the line search is successful, updates the `state.position` by
taking the corresponding step.
Args:
state: A namedtuple instance holding values for the current state of the
search procedure. The state must include the fields: `position`,
`objective_value`, `objective_gradient`, `num_iterations`,
`num_objective_evaluations`, `converged` and `failed`.
value_and_gradients_function: A Python callable that accepts a point as a
real `Tensor` of shape `[..., n]` and returns a tuple of two tensors of
the same dtype: the objective function value, a real `Tensor` of shape
`[...]`, and its derivative, another real `Tensor` of shape `[..., n]`.
search_direction: A real `Tensor` of shape `[..., n]`. The direction along
which to perform line search.
grad_tolerance: Scalar `Tensor` of real dtype. Specifies the gradient
tolerance for the procedure.
f_relative_tolerance: Scalar `Tensor` of real dtype. Specifies the tolerance
for the relative change in the objective value.
x_tolerance: Scalar `Tensor` of real dtype. Specifies the tolerance for the
change in the position.
stopping_condition: A Python function that takes as input two Boolean
tensors of shape `[...]`, and returns a Boolean scalar tensor. The input
tensors are `converged` and `failed`, indicating the current status of
each respective batch member; the return value states whether the
algorithm should stop.
max_iterations: A Python integer that is used as the maximum number of
iterations of the hager_zhang line search algorithm
f_absolute_tolerance: Scalar `Tensor` of real dtype. Specifies the tolerance
for the absolute change in the objective value.
Returns:
A copy of the input state with the following fields updated:
converged: a Boolean `Tensor` of shape `[...]` indicating whether the
convergence criteria has been met.
failed: a Boolean `Tensor` of shape `[...]` indicating whether the line
search procedure failed to converge, or if either the updated gradient
or objective function are no longer finite.
num_iterations: Increased by 1.
num_objective_evaluations: Increased by the number of times that the
objective function got evaluated.
position, objective_value, objective_gradient: If line search succeeded,
updated by computing the new position and evaluating the objective
function at that position.
"""
line_search_value_grad_func = _restrict_along_direction(
value_and_gradients_function, state.position, search_direction)
derivative_at_start_pt = tf.reduce_sum(
state.objective_gradient * search_direction, axis=-1)
val_0 = ValueAndGradient(x=_broadcast(0, state.position),
f=state.objective_value,
df=derivative_at_start_pt,
full_gradient=state.objective_gradient)
inactive = state.failed | state.converged
ls_result = linesearch.hager_zhang(
line_search_value_grad_func,
initial_step_size=_broadcast(1, state.position),
value_at_zero=val_0,
converged=inactive,
max_iterations=max_iterations) # No search needed for these.
state_after_ls = update_fields(
state,
failed=state.failed | (~state.converged & ~ls_result.converged),
num_iterations=state.num_iterations + 1,
num_objective_evaluations=(
state.num_objective_evaluations + ls_result.func_evals))
def _do_update_position():
# For inactive batch members `left.x` is zero. However, their
# `search_direction` might also be undefined, so we can't rely on
# multiplication by zero to produce a `position_delta` of zero.
position_delta = tf.where(
inactive[..., tf.newaxis],
dtype_util.as_numpy_dtype(search_direction.dtype)(0),
search_direction * ls_result.left.x[..., tf.newaxis])
return _update_position(
state_after_ls,
position_delta,
ls_result.left.f,
ls_result.left.full_gradient,
grad_tolerance, f_relative_tolerance, x_tolerance,
f_absolute_tolerance) # pyformat: disable
return ps.cond(
stopping_condition(state.converged, state.failed),
true_fn=lambda: state_after_ls,
false_fn=_do_update_position)
def update_fields(state, **kwargs):
"""Copies the argument and overrides some of its fields.
Args:
state: A `collections.namedtuple` instance.
**kwargs: Other named arguments represent fields in the tuple to override
with new values.
Returns:
A namedtuple, of the same class as the input argument, with the updated
fields.
Raises:
ValueError if the supplied kwargs contain fields not present in the
input argument.
"""
return state._replace(**kwargs)
def _restrict_along_direction(value_and_gradients_function,
position,
direction):
"""Restricts a function in n-dimensions to a given direction.
Suppose f: R^n -> R. Then given a point x0 and a vector p0 in R^n, the
restriction of the function along that direction is defined by:
```None
g(t) = f(x0 + t * p0)
```
This function performs this restriction on the given function. In addition, it
also computes the gradient of the restricted function along the restriction
direction. This is equivalent to computing `dg/dt` in the definition above.
Args:
value_and_gradients_function: Callable accepting a single real `Tensor`
argument of shape `[..., n]` and returning a tuple of a real `Tensor` of
shape `[...]` and a real `Tensor` of shape `[..., n]`. The multivariate
function whose restriction is to be computed. The output values of the
callable are the function value and the gradients at the input argument.
position: `Tensor` of real dtype and shape consumable by
`value_and_gradients_function`. Corresponds to `x0` in the definition
above.
direction: `Tensor` of the same dtype and shape as `position`. The direction
along which to restrict the function. Note that the direction need not
be a unit vector.
Returns:
restricted_value_and_gradients_func: A callable accepting a tensor of shape
broadcastable to `[...]` and same dtype as `position` and returning a
namedtuple of `Tensors`. The input tensor is the parameter along the
direction labelled `t` above. The return value contains fields:
x: A real `Tensor` of shape `[...]`. The input value `t` where the line
function was evaluated, after any necessary broadcasting.
f: A real `Tensor` of shape `[...]` containing the value of the
function at the point `position + t * direction`.
df: A real `Tensor` of shape `[...]` containing the derivative at
`position + t * direction`.
full_gradient: A real `Tensor` of shape `[..., n]`, the full gradient
of the original `value_and_gradients_function`.
"""
def _restricted_func(t):
pt = position + t[..., tf.newaxis] * direction
t = _broadcast(t, position)
objective_value, gradient = value_and_gradients_function(pt)
return ValueAndGradient(
x=t,
f=objective_value,
df=tf.reduce_sum(gradient * direction, axis=-1),
full_gradient=gradient)
return _restricted_func
def _update_position(state,
position_delta,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance,
f_absolute_tolerance): # pyformat: disable
"""Updates the state advancing its position by a given position_delta."""
state = terminate_if_not_finite(state, next_objective, next_gradient)
next_position = state.position + position_delta
# pyformat: disable
converged = ~state.failed & _check_convergence(state.position,
next_position,
state.objective_value,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance,
f_absolute_tolerance)
# pyformat: enable
return update_fields(
state,
converged=state.converged | converged,
position=next_position,
objective_value=next_objective,
objective_gradient=next_gradient)
def norm(value, dims, order=None):
"""Compute the norm of the given (possibly batched) value.
Args:
value: A `Tensor` of real dtype.
dims: An Python integer with the number of non-batching dimensions in the
value, i.e. `dims=0` (scalars), `dims=1` (vectors), `dims=2` (matrices).
order: Order of the norm, defaults to `np.inf`.
"""
if dims == 0:
return tf.math.abs(value)
elif dims == 1:
axis = -1
elif dims == 2:
axis = [-1, -2]
else:
ValueError(dims)
if order is None:
order = np.inf
return tf.norm(tensor=value, axis=axis, ord=order)
def _check_convergence(current_position,
next_position,
current_objective,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance,
f_absolute_tolerance): # pyformat: disable
"""Checks if the algorithm satisfies the convergence criteria."""
grad_converged = norm(next_gradient, dims=1) <= grad_tolerance
x_converged = norm(next_position - current_position, dims=1) <= x_tolerance
f_relative_converged = (
norm(next_objective - current_objective, dims=0) <=
f_relative_tolerance * current_objective)
f_absolute_converged = (
norm(next_objective - current_objective, dims=0) <= f_absolute_tolerance)
return (grad_converged | x_converged | f_relative_converged
| f_absolute_converged)
def _broadcast(value, target):
"""Broadcast a value to match the batching dimensions of a target.
If necessary the value is converted into a tensor. Both value and target
should be of the same dtype.
Args:
value: A value to broadcast.
target: A `Tensor` of shape [b1, ..., bn, d].
Returns:
A `Tensor` of shape [b1, ..., bn] and same dtype as the target.
"""
return tf.broadcast_to(
tf.convert_to_tensor(value, dtype=target.dtype),
ps.shape(target)[:-1])
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy combinations for combinations.combine()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy as mirrored_lib
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.platform import flags
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.util.tf_export import tf_export
_TF_INTERNAL_API_PREFIX = "__internal__.distribute.combinations."
_did_connect_to_cluster = False
CollectiveAllReduceExtended = (
collective_all_reduce_strategy.CollectiveAllReduceExtended)
def _version_chooser(tf1_cls, tf2_cls):
def creator(*args, **kwargs):
if tf2.enabled():
return tf2_cls(*args, **kwargs)
return tf1_cls(*args, **kwargs)
return creator
MirroredStrategy = _version_chooser(mirrored_lib.MirroredStrategyV1,
mirrored_lib.MirroredStrategy)
CentralStorageStrategy = _version_chooser(
central_storage_strategy.CentralStorageStrategyV1,
central_storage_strategy.CentralStorageStrategy)
OneDeviceStrategy = _version_chooser(one_device_lib.OneDeviceStrategyV1,
one_device_lib.OneDeviceStrategy)
# Only V2 CollectiveAllReduceStrategy combinations are supported.
CollectiveAllReduceStrategy = (
collective_all_reduce_strategy.CollectiveAllReduceStrategy)
# pylint: disable=missing-docstring
def _get_tpu_strategy_creator(steps_per_run,
use_single_core=False,
enable_packed_variable=False,
**kwargs):
def _create_tpu_strategy():
FLAGS = flags.FLAGS # pylint: disable=invalid-name
global _did_connect_to_cluster
try:
# Attempt to locally discover the TPU. This will fail for Cloud TPU, in
# which case we fall back to the values passed as flags.
resolver = tpu_cluster_resolver.TPUClusterResolver()
did_automatically_resolve = True
except ValueError:
did_automatically_resolve = False
# These flags will be defined by tpu_test_wrapper.py.
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=hasattr(FLAGS, "tpu") and FLAGS.tpu or "",
zone=hasattr(FLAGS, "zone") and FLAGS.zone or None,
project=hasattr(FLAGS, "project") and FLAGS.project or None,
)
# Only connect once per process, rather than per test method.
if getattr(FLAGS, "tpu", "") or did_automatically_resolve:
if not _did_connect_to_cluster:
remote.connect_to_cluster(resolver)
_did_connect_to_cluster = True
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
topology,
core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)
# Steps per run is only supported in TF 1.x
if tf2.enabled():
strategy = tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs)
else:
strategy = tpu_lib.TPUStrategyV1(resolver, steps_per_run,
device_assignment, **kwargs)
strategy._enable_packed_variable_in_eager_mode = enable_packed_variable # pylint: disable=protected-access
return strategy
return _create_tpu_strategy
def _get_multi_worker_mirrored_creator(required_gpus):
def _create_multi_worker_mirrored():
tf_config = cluster_resolver.TFConfigClusterResolver()
master = tf_config.master()
if tf_config.rpc_layer:
# Strip off the rpc_layer suffix.
master = master[len("%s://" % tf_config.rpc_layer):]
resolver = cluster_resolver.SimpleClusterResolver(
cluster_spec=tf_config.cluster_spec(),
task_type=tf_config.task_type,
task_id=tf_config.task_id,
master=master,
environment=tf_config.environment,
num_accelerators={"GPU": required_gpus},
rpc_layer=tf_config.rpc_layer or "grpc",
)
# Disable health check. We don't have a reliable to shutdown the strategy
# (and thus the health check) at the end of a test. Turning on health check
# causes some flakiness since we re-create part of the server when creating
# a strategy, and our tests are capable of handling failures.
CollectiveAllReduceExtended._enable_check_health = False # pylint: disable=protected-access
# Always create the strategy in eager mode so that it starts the server and
# configures the eager context. The eager context can no longer be
# configured after initialization.
with context.eager_mode():
strategy = CollectiveAllReduceStrategy(cluster_resolver=resolver)
# TODO(b/152320929): Wait for the cluster before proceeding, otherwise
# collectives may hang if any worker launches collectives before the chief
# creates the strategy.
try:
multi_process_runner.get_barrier().wait()
except ValueError:
# If the creator is called in the main process,
# multi_process_runner.get_barrier() raises ValueError, which is safe to
# ignore.
pass
return strategy
return _create_multi_worker_mirrored
def _deferred_pool_runner(has_chief, num_workers, initializer=None):
"""Returns a callable that returns the pool runner.
It creates the pool runner only upon first invocation. This avoids creating it
when this file is imported.
Args:
has_chief: whether there should be a chief.
num_workers: the number of workers excluding the chief.
initializer: initializer of each process.
Returns:
A callable that returns the runner.
"""
container = []
def get_or_create():
if not container:
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=has_chief,
num_workers=num_workers,
num_ps=0,
has_eval=False)
runner = multi_process_runner.MultiProcessPoolRunner(
cluster_spec, initializer=initializer)
container.append(runner)
return container[0]
return get_or_create
# We need to create the strategy in the initializer to start the server before
# any test runs.
_two_worker_pool = _deferred_pool_runner(
has_chief=True,
num_workers=1,
initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
_four_worker_pool = _deferred_pool_runner(
has_chief=True,
num_workers=3,
initializer=_get_multi_worker_mirrored_creator(required_gpus=0))
# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
"OneDeviceCPU", lambda: OneDeviceStrategy("/cpu:0"), required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
"OneDeviceGPU", lambda: OneDeviceStrategy("/gpu:0"), required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1CPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"),
required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1GPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"),
required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_packed_var = combinations.NamedDistribution(
"TPUPackedVar",
_get_tpu_strategy_creator(steps_per_run=2, enable_packed_variable=True),
required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
"TPUOneCore",
_get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = combinations.NamedDistribution(
"TPUOneStepOneCore",
_get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),
required_tpu=True)
cloud_tpu_strategy = combinations.NamedDistribution(
"CloudTPU",
_get_tpu_strategy_creator(steps_per_run=2),
required_tpu=True,
use_cloud_tpu=True)
mirrored_strategy_with_one_cpu = combinations.NamedDistribution(
"Mirrored1CPU", lambda: MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = combinations.NamedDistribution(
"Mirrored1GPU", lambda: MirroredStrategy(["/gpu:0"]), required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.
mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(
"Mirrored2CPU", lambda: MirroredStrategy(["/cpu:1", "/cpu:2"]))
mirrored_strategy_with_cpu_1_and_2.__doc__ = (
"""Mirrored strategy with 2 virtual CPUs.
Should set up logical devices before use
""")
central_storage_strategy_with_two_gpus = combinations.NamedDistribution(
"CentralStorage2GPUs",
lambda: CentralStorageStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
central_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"CentralStorageCPUAndGPU",
lambda: CentralStorageStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
# chief + 1 worker, with CPU.
multi_worker_mirrored_2x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=1,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 1 worker, with 1 GPU each.
multi_worker_mirrored_2x1_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1GPU",
_get_multi_worker_mirrored_creator(required_gpus=1),
has_chief=True,
num_workers=1,
required_gpus=1,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 1 worker, with 2 GPU each.
multi_worker_mirrored_2x2_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x2GPU",
_get_multi_worker_mirrored_creator(required_gpus=2),
has_chief=True,
num_workers=1,
required_gpus=2,
pool_runner_fn=_two_worker_pool,
no_xla=True,
)
# chief + 3 workers, with CPU.
multi_worker_mirrored_4x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored4x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=3,
pool_runner_fn=_four_worker_pool,
no_xla=True,
)
graph_and_eager_modes = ["graph", "eager"]
# TODO(crccw): remove after tf-nightly picks up the new API.
def set_virtual_cpus_to_at_least(num_virtual_cpus):
test_util.set_logical_devices_to_at_least("CPU", num_virtual_cpus)
strategies_minus_tpu = [
default_strategy,
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
central_storage_strategy_with_gpu_and_cpu,
]
strategies_minus_default_and_tpu = [
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
]
tpu_strategies = [
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
tpu_strategy_packed_var,
cloud_tpu_strategy,
]
all_strategies_minus_default = strategies_minus_default_and_tpu + tpu_strategies
all_strategies = strategies_minus_tpu + tpu_strategies
two_replica_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
multi_worker_mirrored_2x1_cpu,
multi_worker_mirrored_2x1_gpu,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
central_storage_strategy_with_gpu_and_cpu,
]
four_replica_strategies = [
multi_worker_mirrored_2x2_gpu,
multi_worker_mirrored_4x1_cpu,
]
# TODO(b/159831907): replace with two_replica_strategies after the tests using
# it work with MWMS.
multidevice_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step
]
multiworker_strategies = [
multi_worker_mirrored_2x1_cpu, multi_worker_mirrored_2x1_gpu,
multi_worker_mirrored_2x2_gpu
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"])
def tpu_strategy_combinations():
return combinations.combine(distribution=tpu_strategies, mode=["graph"])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__,
"central_storage_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "central_storage_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "cloud_tpu_strategy",
v1=[]).export_constant(__name__, "cloud_tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "default_strategy",
v1=[]).export_constant(__name__, "default_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_cpu_1_and_2",
v1=[]).export_constant(__name__, "mirrored_strategy_with_cpu_1_and_2")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_gpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "mirrored_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_cpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x2_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy",
v1=[]).export_constant(__name__, "one_device_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy_gpu",
v1=[]).export_constant(__name__, "one_device_strategy_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy",
v1=[]).export_constant(__name__, "tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_one_core",
v1=[]).export_constant(__name__, "tpu_strategy_one_core")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_packed_var",
v1=[]).export_constant(__name__, "tpu_strategy_packed_var")
|
|
import param
import numpy as np
from ..core import Dimension, Dataset, NdOverlay
from ..core.operation import Operation
from ..core.util import basestring, cartesian_product, isfinite
from ..element import (Curve, Area, Image, Distribution, Bivariate,
Contours, Polygons)
from .element import contours
def _kde_support(bin_range, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
kmin, kmax = bin_range[0] - bw * cut, bin_range[1] + bw * cut
if isfinite(clip[0]):
kmin = max(kmin, clip[0])
if isfinite(clip[1]):
kmax = min(kmax, clip[1])
return np.linspace(kmin, kmax, gridsize)
class univariate_kde(Operation):
"""
Computes a 1D kernel density estimate (KDE) along the supplied
dimension. Kernel density estimation is a non-parametric way to
estimate the probability density function of a random variable.
The KDE works by placing a Gaussian kernel at each sample with
the supplied bandwidth. These kernels are then summed to produce
the density estimate. By default a good bandwidth is determined
using the bw_method but it may be overridden by an explicit value.
"""
bw_method = param.ObjectSelector(default='scott', objects=['scott', 'silverman'], doc="""
Method of automatically determining KDE bandwidth""")
bandwidth = param.Number(default=None, doc="""
Allows supplying explicit bandwidth value rather than relying on scott or silverman method.""")
cut = param.Number(default=3, doc="""
Draw the estimate to cut * bw from the extreme data points.""")
bin_range = param.NumericTuple(default=None, length=2, doc="""
Specifies the range within which to compute the KDE.""")
dimension = param.String(default=None, doc="""
Along which dimension of the Element to compute the KDE.""")
filled = param.Boolean(default=True, doc="""
Controls whether to return filled or unfilled KDE.""")
n_samples = param.Integer(default=100, doc="""
Number of samples to compute the KDE over.""")
groupby = param.ClassSelector(default=None, class_=(basestring, Dimension), doc="""
Defines a dimension to group the Histogram returning an NdOverlay of Histograms.""")
def _process(self, element, key=None):
if self.p.groupby:
if not isinstance(element, Dataset):
raise ValueError('Cannot use histogram groupby on non-Dataset Element')
grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)
self.p.groupby = None
return grouped.map(self._process, Dataset)
try:
from scipy import stats
from scipy.linalg import LinAlgError
except ImportError:
raise ImportError('%s operation requires SciPy to be installed.' % type(self).__name__)
params = {}
if isinstance(element, Distribution):
selected_dim = element.kdims[0]
if element.group != type(element).__name__:
params['group'] = element.group
params['label'] = element.label
vdim = element.vdims[0]
vdim_name = '{}_density'.format(selected_dim.name)
vdims = [vdim(vdim_name, label='Density') if vdim.name == 'Density' else vdim]
else:
if self.p.dimension:
selected_dim = element.get_dimension(self.p.dimension)
else:
dimensions = element.vdims+element.kdims
if not dimensions:
raise ValueError("%s element does not declare any dimensions "
"to compute the kernel density estimate on." %
type(element).__name__)
selected_dim = dimensions[0]
vdim_name = '{}_density'.format(selected_dim.name)
vdims = [Dimension(vdim_name, label='Density')]
data = element.dimension_values(selected_dim)
bin_range = self.p.bin_range or element.range(selected_dim)
if bin_range == (0, 0) or any(not isfinite(r) for r in bin_range):
bin_range = (0, 1)
elif bin_range[0] == bin_range[1]:
bin_range = (bin_range[0]-0.5, bin_range[1]+0.5)
element_type = Area if self.p.filled else Curve
data = data[isfinite(data)] if len(data) else []
if len(data) > 1:
try:
kde = stats.gaussian_kde(data)
except LinAlgError:
return element_type([], selected_dim, vdims, **params)
if self.p.bandwidth:
kde.set_bandwidth(self.p.bandwidth)
bw = kde.scotts_factor() * data.std(ddof=1)
if self.p.bin_range:
xs = np.linspace(bin_range[0], bin_range[1], self.p.n_samples)
else:
xs = _kde_support(bin_range, bw, self.p.n_samples, self.p.cut, selected_dim.range)
ys = kde.evaluate(xs)
else:
xs = np.linspace(bin_range[0], bin_range[1], self.p.n_samples)
ys = np.full_like(xs, 0)
return element_type((xs, ys), kdims=[selected_dim], vdims=vdims, **params)
class bivariate_kde(Operation):
"""
Computes a 2D kernel density estimate (KDE) of the first two
dimensions in the input data. Kernel density estimation is a
non-parametric way to estimate the probability density function of
a random variable.
The KDE works by placing 2D Gaussian kernel at each sample with
the supplied bandwidth. These kernels are then summed to produce
the density estimate. By default a good bandwidth is determined
using the bw_method but it may be overridden by an explicit value.
"""
contours = param.Boolean(default=True, doc="""
Whether to compute contours from the KDE, determines whether to
return an Image or Contours/Polygons.""")
bw_method = param.ObjectSelector(default='scott', objects=['scott', 'silverman'], doc="""
Method of automatically determining KDE bandwidth""")
bandwidth = param.Number(default=None, doc="""
Allows supplying explicit bandwidth value rather than relying
on scott or silverman method.""")
cut = param.Number(default=3, doc="""
Draw the estimate to cut * bw from the extreme data points.""")
filled = param.Boolean(default=False, doc="""
Controls whether to return filled or unfilled contours.""")
levels = param.ClassSelector(default=10, class_=(list, int), doc="""
A list of scalar values used to specify the contour levels.""")
n_samples = param.Integer(default=100, doc="""
Number of samples to compute the KDE over.""")
x_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max x-value. Auto-ranges
if set to None.""")
y_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max y-value. Auto-ranges
if set to None.""")
def _process(self, element, key=None):
try:
from scipy import stats
except ImportError:
raise ImportError('%s operation requires SciPy to be installed.' % type(self).__name__)
if len(element.dimensions()) < 2:
raise ValueError("bivariate_kde can only be computed on elements "
"declaring at least two dimensions.")
xdim, ydim = element.dimensions()[:2]
params = {}
if isinstance(element, Bivariate):
if element.group != type(element).__name__:
params['group'] = element.group
params['label'] = element.label
vdim = element.vdims[0]
else:
vdim = 'Density'
data = element.array([0, 1]).T
xmin, xmax = self.p.x_range or element.range(0)
ymin, ymax = self.p.y_range or element.range(1)
if any(not isfinite(v) for v in (xmin, xmax)):
xmin, xmax = -0.5, 0.5
elif xmin == xmax:
xmin, xmax = xmin-0.5, xmax+0.5
if any(not isfinite(v) for v in (ymin, ymax)):
ymin, ymax = -0.5, 0.5
elif ymin == ymax:
ymin, ymax = ymin-0.5, ymax+0.5
data = data[:, isfinite(data).min(axis=0)] if data.shape[1] > 1 else np.empty((2, 0))
if data.shape[1] > 1:
kde = stats.gaussian_kde(data)
if self.p.bandwidth:
kde.set_bandwidth(self.p.bandwidth)
bw = kde.scotts_factor() * data.std(ddof=1)
if self.p.x_range:
xs = np.linspace(xmin, xmax, self.p.n_samples)
else:
xs = _kde_support((xmin, xmax), bw, self.p.n_samples, self.p.cut, xdim.range)
if self.p.y_range:
ys = np.linspace(ymin, ymax, self.p.n_samples)
else:
ys = _kde_support((ymin, ymax), bw, self.p.n_samples, self.p.cut, ydim.range)
xx, yy = cartesian_product([xs, ys], False)
positions = np.vstack([xx.ravel(), yy.ravel()])
f = np.reshape(kde(positions).T, xx.shape)
elif self.p.contours:
eltype = Polygons if self.p.filled else Contours
return eltype([], kdims=[xdim, ydim], vdims=[vdim])
else:
xs = np.linspace(xmin, xmax, self.p.n_samples)
ys = np.linspace(ymin, ymax, self.p.n_samples)
f = np.zeros((self.p.n_samples, self.p.n_samples))
img = Image((xs, ys, f.T), kdims=element.dimensions()[:2], vdims=[vdim], **params)
if self.p.contours:
cntr = contours(img, filled=self.p.filled, levels=self.p.levels)
return cntr.clone(cntr.data[1:], **params)
return img
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import check_call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.test_dimensions import create_single_exec_option_dimension
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.common.skip import SkipIfS3, SkipIfIsilon, SkipIfLocal
from tests.util.filesystem_utils import get_fs_path
@SkipIfS3.hive
@SkipIfIsilon.hive
@SkipIfLocal.hive
class TestRefreshPartition(ImpalaTestSuite):
"""
This class tests the functionality to refresh a partition individually
for a table in HDFS
"""
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestRefreshPartition, cls).add_test_dimensions()
# There is no reason to run these tests using all dimensions.
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_add_hive_partition_and_refresh(self, vector, unique_database):
"""
Partition added in Hive can be viewed in Impala after refreshing
partition.
"""
table_name = unique_database + '.' + "partition_test_table"
self.client.execute(
'create table %s (x int) partitioned by (y int, z int)' %
table_name)
assert [] == self.get_impala_partition_info(table_name, 'y', 'z')
self.run_stmt_in_hive(
'alter table %s add partition (y=333, z=5309)' % table_name)
# Make sure Impala can't see the partition yet
assert [] == self.get_impala_partition_info(table_name, 'y', 'z')
self.client.execute('refresh %s partition (y=333, z=5309)' % table_name)
# Impala can see the partition
assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
# Impala's refresh didn't alter Hive's knowledge of the partition
assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
def test_drop_hive_partition_and_refresh(self, vector, unique_database):
"""
Partition dropped in Hive is removed in Impala as well after refreshing
partition.
"""
table_name = unique_database + '.' + "partition_test_table"
self.client.execute(
'create table %s (x int) partitioned by (y int, z int)' %
table_name)
self.client.execute(
'alter table %s add partition (y=333, z=5309)' % table_name)
assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
self.run_stmt_in_hive(
'alter table %s drop partition (y=333, z=5309)' % table_name)
# Make sure Impala can still see the partition
assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
self.client.execute('refresh %s partition (y=333, z=5309)' % table_name)
# Impala can see the partition is not there anymore
assert [] == self.get_impala_partition_info(table_name, 'y', 'z')
# Impala's refresh didn't alter Hive's knowledge of the partition
assert [] == self.hive_partition_names(table_name)
def test_add_data_and_refresh(self, vector, unique_database):
"""
Data added through hive is visible in impala after refresh of partition.
"""
table_name = unique_database + '.' + "partition_test_table"
self.client.execute(
'create table %s (x int) partitioned by (y int, z int)' %
table_name)
self.client.execute(
'alter table %s add partition (y=333, z=5309)' % table_name)
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str('0')]
self.run_stmt_in_hive(
'insert into table %s partition (y=333, z=5309) values (2)'
% table_name)
# Make sure its still shows the same result before refreshing
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str('0')]
self.client.execute('refresh %s partition (y=333, z=5309)' % table_name)
assert '2\t333\t5309' == self.client.execute(
'select * from %s' % table_name).get_data()
def test_refresh_invalid_partition(self, vector, unique_database):
"""
Trying to refresh a partition that does not exist does not modify anything
either in impala or hive.
"""
table_name = unique_database + '.' + "partition_test_table"
self.client.execute(
'create table %s (x int) partitioned by (y int, z int)' %
table_name)
self.client.execute(
'alter table %s add partition (y=333, z=5309)' % table_name)
assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
self.client.execute('refresh %s partition (y=71, z=8857)' % table_name)
assert [('333', '5309')] == self.get_impala_partition_info(table_name, 'y', 'z')
assert ['y=333/z=5309'] == self.hive_partition_names(table_name)
def test_remove_data_and_refresh(self, vector, unique_database):
"""
Data removed through hive is visible in impala after refresh of partition.
"""
expected_error = 'Error(2): No such file or directory'
table_name = unique_database + '.' + "partition_test_table"
self.client.execute(
'create table %s (x int) partitioned by (y int, z int)' %
table_name)
self.client.execute(
'alter table %s add partition (y=333, z=5309)' % table_name)
self.client.execute(
'insert into table %s partition (y=333, z=5309) values (2)' % table_name)
assert '2\t333\t5309' == self.client.execute(
'select * from %s' % table_name).get_data()
self.run_stmt_in_hive(
'alter table %s drop partition (y=333, z=5309)' % table_name)
# Query the table and check for expected error.
try:
self.client.execute("select * from %s" % table_name)
assert False, "Query was expected to fail"
except ImpalaBeeswaxException as e:
assert expected_error in str(e)
self.client.execute('refresh %s partition (y=333, z=5309)' % table_name)
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str('0')]
def test_add_delete_data_to_hdfs_and_refresh(self, vector, unique_database):
"""
Data added/deleted directly in HDFS is visible in impala after refresh of
partition.
"""
table_name = unique_database + '.' + "partition_test_table"
table_location = get_fs_path("/test-warehouse/%s" % unique_database)
file_name = "alltypes.parq"
src_file = get_fs_path("/test-warehouse/alltypesagg_parquet/year=2010/month=1/"
"day=9/*.parq")
file_num_rows = 1000
self.client.execute("""
create table %s like functional.alltypes stored as parquet
location '%s'
""" % (table_name, table_location))
self.client.execute("alter table %s add partition (year=2010, month=1)" %
table_name)
self.client.execute("refresh %s" % table_name)
# Check that there is no data in table
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(0)]
dst_path = "%s/year=2010/month=1/%s" % (table_location, file_name)
check_call(["hadoop", "fs", "-cp", "-f", src_file, dst_path], shell=False)
# Check that data added is not visible before refresh
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(0)]
# Chech that data is visible after refresh
self.client.execute("refresh %s partition (year=2010, month=1)" % table_name)
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(file_num_rows)]
# Check that after deleting the file and refreshing, it returns zero rows
check_call(["hadoop", "fs", "-rm", dst_path], shell=False)
self.client.execute("refresh %s partition (year=2010, month=1)" % table_name)
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(0)]
def test_confirm_individual_refresh(self, vector, unique_database):
"""
Data added directly to HDFS is only visible for the partition refreshed
"""
table_name = unique_database + '.' + "partition_test_table"
table_location = get_fs_path("/test-warehouse/%s" % unique_database)
file_name = "alltypes.parq"
src_file = get_fs_path("/test-warehouse/alltypesagg_parquet/year=2010/month=1/"
"day=9/*.parq")
file_num_rows = 1000
self.client.execute("""
create table %s like functional.alltypes stored as parquet
location '%s'
""" % (table_name, table_location))
for month in [1, 2]:
self.client.execute("alter table %s add partition (year=2010, month=%s)" %
(table_name, month))
self.client.execute("refresh %s" % table_name)
# Check that there is no data in table
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(0)]
dst_path = table_location + "/year=2010/month=%s/" + file_name
for month in [1, 2]:
check_call(["hadoop", "fs", "-cp", "-f", src_file, dst_path % month],
shell=False)
# Check that data added is not visible before refresh
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(0)]
# Check that data is visible after refresh on the first partition only
self.client.execute("refresh %s partition (year=2010, month=1)" %
table_name)
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(file_num_rows)]
# Check that the data is not yet visible for the second partition
# that was not refreshed
result = self.client.execute(
"select count(*) from %s where year=2010 and month=2" % table_name)
assert result.data == [str(0)]
# Check that data is visible for the second partition after refresh
self.client.execute("refresh %s partition (year=2010, month=2)" % table_name)
result = self.client.execute("select count(*) from %s" % table_name)
assert result.data == [str(file_num_rows*2)]
|
|
"""various utility functions"""
import os.path
import sys
import platform
import multiprocessing
import yaml
from mod import log
host_platforms = {
'Darwin': 'osx',
'Linux': 'linux',
'Windows': 'win'
}
#-------------------------------------------------------------------------------
def fix_path(path) :
"""if on Windows, replace backslashes in path with forward slashes
:param path: input path
:returns: fixed up path
"""
return path.replace('\\', '/')
#-------------------------------------------------------------------------------
def get_workspace_dir(fips_dir) :
"""get workspace (parent) dir from fips dir
:param fips_dir: absolute path to fips
:returns: absolute path to workspace (parent dir of fips)
"""
return os.path.split(fips_dir)[0]
#-------------------------------------------------------------------------------
def get_project_dir(fips_dir, proj_name) :
"""get absolute path to project directory in same workspace as fips
:param fips_dir: absolute path of fips
:param proj_name: project name
:returns: absolute path to project in same directory as fips
"""
return get_workspace_dir(fips_dir) + '/' + proj_name
#-------------------------------------------------------------------------------
def get_build_dir(fips_dir, proj_name, cfg) :
"""get absolute path to build directory in same workspace as fips for
given configuration
:param fips_dir: absolute path of fips
:param proj_name: project name
:param cfg: build config name (or config object for backward compatibility)
:returns: absolute path of build directory
"""
cfg_name = cfg if type(cfg) == str else cfg['name']
return '{}/fips-build/{}/{}'.format(get_workspace_dir(fips_dir), proj_name, cfg_name)
#-------------------------------------------------------------------------------
def get_deploy_dir(fips_dir, proj_name, cfg) :
"""get absolute path to deploy directory in same workspace as fips
:param fips_dir: absolute path of fips
:param proj_name: project name
:param cfg: build config name (or config object for backward compatibility)
:returns: absolute path of deploy directory
"""
cfg_name = cfg if type(cfg) == str else cfg['name']
return '{}/fips-deploy/{}/{}'.format(get_workspace_dir(fips_dir), proj_name, cfg_name)
#-------------------------------------------------------------------------------
def get_fips_dir(proj_dir, name):
"""Internal helper method to check for and return the absolute path of
a fips directory.
If name is 'config', the following happens:
If 'proj_dir/fips-configs/' exists, return that path, otherwise,
if 'proj_dir/fips-files/configs' exists, return that path, otherwise,
return None.
:param proj_dir: absolute path of project directory
:name: the name without the 'fips-' prefix
"""
d0 = proj_dir + '/fips-' + name
d1 = proj_dir + '/fips-files/' + name
if os.path.isdir(d0):
return d0
elif os.path.isdir(d1):
return d1
else:
return None
#-------------------------------------------------------------------------------
def get_configs_dir(proj_dir):
"""returns path to directory with project-specific config files, or
None if no such directory exists.
:param proj_dir: absolute path of project directory
:returns: absolute path of configs dir, or None
"""
return get_fips_dir(proj_dir, 'configs')
#-------------------------------------------------------------------------------
def get_verbs_dir(proj_dir):
"""returns path to directory with project-specifc verbs, or None
if no such directory exists.
:param proj_dir: absolute path of project directory
:returns: absolute path of verbs dir, or None
"""
return get_fips_dir(proj_dir, 'verbs')
#-------------------------------------------------------------------------------
def get_generators_dir(proj_dir):
"""returns path to directory with project-specific generators, or None
if no such directory exists.
:param proj_dir: absolute path of project directory
:returns: absolute path of generators dir, or None
"""
return get_fips_dir(proj_dir, 'generators')
#-------------------------------------------------------------------------------
def get_toolchains_dir(proj_dir):
"""returns path to directory with project-specific cmake toolchain files,
or None if no such directory exists.
:param proj_dir: absolute path of project directory
:returns: absolute path of toolchains dir, or None
"""
return get_fips_dir(proj_dir, 'toolchains')
#-------------------------------------------------------------------------------
def get_giturl_from_url(url) :
"""extracts the actual git url from an url string
(splits off the branch name after the optional '#')
:param url: an url string, with optional '#' branch name appended
:returns: the actual git url
"""
return url.split('#')[0]
#-------------------------------------------------------------------------------
def get_gitbranch_from_url(url) :
"""extracts the branch name from an url string
(after the optional '#'), returns 'master' if no branch name
specified.
:param url: an url string, with optional '#' branch name appended
:returns: the extracted branch name, or 'master'
"""
if '#' in url :
return url.split('#')[1]
else :
return 'master'
#-------------------------------------------------------------------------------
def get_project_name_from_url(url) :
"""get the project name from a git url
:param url: a git url
:returns: project name (last component of git url, minus extension)
"""
return os.path.splitext(url.split('/')[-1])[0]
#-------------------------------------------------------------------------------
def get_project_name_from_dir(proj_dir) :
"""extract the project name from the absolute project directory
:param proj_dir: absolute project directory
:returns: project name (last dir-name of project directory)
"""
return os.path.split(proj_dir)[1]
#-------------------------------------------------------------------------------
def load_fips_yml(proj_dir) :
"""load the fips.yml file from project directory
:param proj_dir: absolute project directory
:returns: dictionary object
"""
dic = None
path = proj_dir + '/fips.yml'
if os.path.isfile(path) :
with open(path, 'r') as f:
dic = yaml.load(f)
if not dic :
dic = {}
return dic
#-------------------------------------------------------------------------------
def lookup_target_cwd(proj_dir, target) :
"""lookup optional working directory for target from fips.yml,
return None if no cwd has been specified for this target in fips.yml
:param proj_dir: absolute project directory
:param target: target name
:returns: working directory or None
"""
target_cwd = None
dic = load_fips_yml(proj_dir)
if 'run' in dic :
if target in dic['run'] :
if 'cwd' in dic['run'][target] :
target_cwd = proj_dir + '/' + dic['run'][target]['cwd']
return target_cwd
#-------------------------------------------------------------------------------
def is_valid_project_dir(proj_dir) :
"""test if the provided directory is a valid fips project (has a
fips.yml file)
:param proj_dir: absolute project directory to check
:returns: True if a valid fips project
"""
if os.path.isdir(proj_dir) :
if not os.path.isfile(proj_dir + '/fips.yml') :
return False
return True
else :
return False
#-------------------------------------------------------------------------------
def ensure_valid_project_dir(proj_dir) :
"""test if project dir is valid, if not, dump error and abort
:param proj_dir: absolute project directory to check
"""
if not is_valid_project_dir(proj_dir) :
log.error("'{}' is not a valid project directory".format(proj_dir))
#-------------------------------------------------------------------------------
def is_git_url(url) :
"""check if 'url' is a valid git url
:param url: url string
:returns: True if a valid url
"""
# we simply check whether the 'naked' url ends with '.git'
url = get_giturl_from_url(url)
return url[-4:] == '.git'
#-------------------------------------------------------------------------------
def confirm(question) :
"""ask user to confirm (y/N)
:param question: the question to confirm
:return: True: user pressed 'y', False: user pressed 'n'
"""
validAnswers={'': False, 'yes': True, 'ye': True, 'y': True, 'no': False, 'n': False }
while True :
sys.stdout.write(question + ' [y/N]: ')
if sys.version_info[0] >= 3:
choice = str(input()).lower()
else:
choice = raw_input().lower()
if choice in validAnswers :
return validAnswers[choice]
else :
log.info("please respond with 'y', 'yes', 'n' or 'no'")
#-------------------------------------------------------------------------------
def url_download_hook(count, block_size, total_size) :
"""a download progress hook for urllib"""
percent = int(count * block_size * 100 / total_size)
sys.stdout.write('\r{}%'.format(percent))
#-------------------------------------------------------------------------------
def get_host_platform() :
"""get the current host platform name (osx, linux or win)
:returns: platform name (osx, linux, win)
"""
plat = platform.system()
if "CYGWIN_NT" in plat:
return host_platforms['Linux']
return host_platforms[platform.system()]
#-------------------------------------------------------------------------------
def get_cfg_target_list(fips_dir, proj_dir, cfg):
proj_name = get_project_name_from_dir(proj_dir)
build_dir = get_build_dir(fips_dir, proj_name, cfg)
targets_path = build_dir + '/fips_targets.yml'
if os.path.isfile(targets_path) :
targets = []
with open(targets_path) as f :
targets = yaml.load(f)
return True, targets
else :
return False, []
#-------------------------------------------------------------------------------
def get_cfg_headersdirs_by_target(fips_dir, proj_dir, cfg):
proj_name = get_project_name_from_dir(proj_dir)
build_dir = get_build_dir(fips_dir, proj_name, cfg)
path = build_dir + '/fips_headerdirs.yml'
if os.path.isfile(path):
headerdirs = {}
with open(path) as f:
headerdirs = yaml.load(f)
return True, headerdirs
else:
return False,{}
#-------------------------------------------------------------------------------
def get_cfg_defines_by_target(fips_dir, proj_dir, cfg):
proj_name = get_project_name_from_dir(proj_dir)
build_dir = get_build_dir(fips_dir, proj_name, cfg)
path = build_dir + '/fips_defines.yml'
if os.path.isfile(path):
defines = {}
with open(path) as f:
defines = yaml.load(f)
return True,defines
else:
return False,{}
#-------------------------------------------------------------------------------
def get_num_cpucores():
try :
return multiprocessing.cpu_count()
except NotImplementedError :
return 2
|
|
from __future__ import unicode_literals
import os
import sys
import json
from copy import deepcopy
from itertools import count
from threading import RLock, Event
from datetime import datetime, timedelta
from collections import Mapping, MutableMapping
import six
from ws4py.client.threadedclient import WebSocketClient
import sideboard.lib
from sideboard.lib import log, config, stopped, on_startup, on_shutdown, DaemonTask, Caller
class _WebSocketClientDispatcher(WebSocketClient):
def __init__(self, dispatcher, url, ssl_opts=None):
self.connected = False
self.dispatcher = dispatcher
WebSocketClient.__init__(self, url, ssl_options=ssl_opts)
def pre_connect(self):
pass
def connect(self, *args, **kwargs):
self.pre_connect()
WebSocketClient.connect(self, *args, **kwargs)
self.connected = True
def close(self, code=1000, reason=''):
try:
WebSocketClient.close(self, code=code, reason=reason)
except:
pass
try:
WebSocketClient.close_connection(self)
except:
pass
self.connected = False
def send(self, data):
log.debug('sending {!r}', data)
assert self.connected, 'tried to send data on closed websocket {!r}'.format(self.url)
if isinstance(data, Mapping):
data = json.dumps(data)
return WebSocketClient.send(self, data)
def received_message(self, message):
message = message.data if isinstance(message.data, six.text_type) else message.data.decode('utf-8')
log.debug('received {!r}', message)
try:
message = json.loads(message)
except:
log.debug('failed to parse incoming message', exc_info=True)
finally:
self.dispatcher.defer(message)
class _Subscriber(object):
def __init__(self, method, src_client, dst_client, src_ws, dest_ws):
self.method, self.src_ws, self.dest_ws, self.src_client, self.dst_client = method, src_ws, dest_ws, src_client, dst_client
def unsubscribe(self):
self.dest_ws.unsubscribe(self.dst_client)
def callback(self, data):
self.src_ws.send(data=data, client=self.src_client)
def errback(self, error):
self.src_ws.send(error=error, client=self.src_client)
def __call__(self, *args, **kwargs):
self.dest_ws.subscribe({
'client': self.dst_client,
'callback': self.callback,
'errback': self.errback
}, self.method, *args, **kwargs)
return self.src_ws.NO_RESPONSE
def __del__(self):
self.unsubscribe()
class WebSocket(object):
"""
Utility class for making websocket connections. This improves on the ws4py
websocket client classes mainly by adding several features:
- automatically detecting dead connections and re-connecting
- utility methods for making synchronous rpc calls and for making
asynchronous subscription calls with callbacks
- adding locking to make sending messages thread-safe
"""
poll_method = 'sideboard.poll'
WebSocketDispatcher = _WebSocketClientDispatcher
def __init__(self, url=None, ssl_opts=None, connect_immediately=True, max_wait=2):
self.ws = None
self.url = url or 'ws://127.0.0.1:{}/wsrpc'.format(config['cherrypy']['server.socket_port'])
self._lock = RLock()
self._callbacks = {}
self._counter = count()
self.ssl_opts = ssl_opts
self._reconnect_attempts = 0
self._last_poll, self._last_reconnect_attempt = None, None
self._dispatcher = Caller(self._dispatch, threads=1)
self._checker = DaemonTask(self._check, interval=1)
if connect_immediately:
self.connect(max_wait=max_wait)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def preprocess(self, method, params):
"""
Each message we send has its parameters passed to this function and
the actual parameters sent are whatever this function returns. By
default this just returns the message unmodified, but plugins can
override this to add whatever logic is needed. We pass the method
name in its full "service.method" form in case the logic depends on
the service being invoked.
"""
return params
@property
def _should_reconnect(self):
interval = min(config['ws.reconnect_interval'], 2 ** self._reconnect_attempts)
cutoff = datetime.now() - timedelta(seconds=interval)
return not self.connected and (self._reconnect_attempts == 0 or self._last_reconnect_attempt < cutoff)
@property
def _should_poll(self):
cutoff = datetime.now() - timedelta(seconds=config['ws.poll_interval'])
return self.connected and (self._last_poll is None or self._last_poll < cutoff)
def _check(self):
if self._should_reconnect:
self._reconnect()
if self._should_poll:
self._poll()
def _poll(self):
assert self.ws and self.ws.connected, 'cannot poll while websocket is not connected'
try:
self.call(self.poll_method)
except:
log.warning('no poll response received from {!r}, closing connection, will attempt to reconnect', self.url, exc_info=True)
self.ws.close()
else:
self._last_poll = datetime.now()
def _refire_subscriptions(self):
try:
for cb in self._callbacks.values():
if 'client' in cb:
params = cb['paramback']() if 'paramback' in cb else cb['params']
self._send(method=cb['method'], params=params, client=cb['client'])
except:
pass # self._send() already closes and logs on error
def _reconnect(self):
with self._lock:
assert not self.connected, 'connection is still active'
try:
self.ws = self.WebSocketDispatcher(self._dispatcher, self.url, ssl_opts=self.ssl_opts)
self.ws.connect()
except Exception as e:
log.warn('failed to connect to {}: {}', self.url, str(e))
self._last_reconnect_attempt = datetime.now()
self._reconnect_attempts += 1
else:
self._reconnect_attempts = 0
self._refire_subscriptions()
def _next_id(self, prefix):
return '{}-{}'.format(prefix, next(self._counter))
def _send(self, **kwargs):
log.debug('sending {}', kwargs)
with self._lock:
assert self.connected, 'tried to send data on closed websocket {!r}'.format(self.url)
try:
return self.ws.send(kwargs)
except:
log.warn('failed to send {!r} on {!r}, closing websocket and will attempt to reconnect', kwargs, self.url)
self.ws.close()
raise
def _dispatch(self, message):
log.debug('dispatching {}', message)
try:
assert isinstance(message, Mapping), 'incoming message is not a dictionary'
assert 'client' in message or 'callback' in message, 'no callback or client in message {}'.format(message)
id = message.get('client') or message.get('callback')
assert id in self._callbacks, 'unknown dispatchee {}'.format(id)
except AssertionError:
self.fallback(message)
else:
if 'error' in message:
self._callbacks[id]['errback'](message['error'])
else:
self._callbacks[id]['callback'](message.get('data'))
def fallback(self, message):
"""
Handler method which is called for incoming websocket messages which
aren't valid responses to an outstanding call or subscription. By
default this just logs an error message. You can override this by
subclassing this class, or just by assigning a hander method, e.g.
>>> ws = WebSocket()
>>> ws.fallback = some_handler_function
>>> ws.connect()
"""
_, exc, _ = sys.exc_info()
log.error('no callback registered for message {!r}, message ignored: {}', message, exc)
@property
def connected(self):
"""boolean indicating whether or not this connection is currently active"""
return bool(self.ws) and self.ws.connected
def connect(self, max_wait=0):
"""
Start the background threads which connect this websocket and handle RPC
dispatching. This method is safe to call even if the websocket is already
connected. You may optionally pass a max_wait parameter if you want to
wait for up to that amount of time for the connection to go through; if
that amount of time elapses without successfully connecting, a warning
message is logged.
"""
self._checker.start()
self._dispatcher.start()
for i in range(10 * max_wait):
if not self.connected:
stopped.wait(0.1)
else:
break
else:
if max_wait:
log.warn('websocket {!r} not connected after {} seconds', self.url, max_wait)
def close(self):
"""
Closes the underlying websocket connection and stops background tasks.
This method is always safe to call; exceptions will be swallowed and
logged, and calling close on an already-closed websocket is a no-op.
"""
self._checker.stop()
self._dispatcher.stop()
if self.ws:
self.ws.close()
def subscribe(self, callback, method, *args, **kwargs):
"""
Send a websocket request which you expect to subscribe you to a channel
with a callback which will be called every time there is new data, and
return the client id which uniquely identifies this subscription.
Callback may be either a function or a dictionary in the form
{
'callback': <function>,
'errback': <function>, # optional
'paramback: <function>, # optional
'client': <string> # optional
}
Both callback and errback take a single argument; for callback, this is
the return value of the method, for errback it is the error message
returning. If no errback is specified, we will log errors at the ERROR
level and do nothing further.
The paramback function exists for subscriptions where we might want to
pass different parameters every time we reconnect. This might be used
for e.g. time-based parameters. This function takes no arguments and
returns the parameters which should be passed every time we connect
and fire (or re-fire) all of our subscriptions.
The client id is automatically generated if omitted, and you should not
set this yourself unless you really know what you're doing.
The positional and keyword arguments passed to this function will be
used as the arguments to the remote method, unless paramback is passed,
in which case that will be used to generate the params, and args/kwargs
will be ignored.
"""
client = self._next_id('client')
if isinstance(callback, Mapping):
assert 'callback' in callback, 'callback is required'
client = callback.setdefault('client', client)
self._callbacks[client] = callback
else:
self._callbacks[client] = {
'client': client,
'callback': callback
}
paramback = self._callbacks[client].get('paramback')
params = self.preprocess(method, paramback() if paramback else (args or kwargs))
self._callbacks[client].setdefault('errback', lambda result: log.error('{}(*{}, **{}) returned an error: {!r}', method, args, kwargs, result))
self._callbacks[client].update({
'method': method,
'params': params
})
try:
self._send(method=method, params=params, client=client)
except:
log.warn('initial subscription to {} at {!r} failed, will retry on reconnect', method, self.url)
return client
def unsubscribe(self, client):
"""
Cancel the websocket subscription identified by the specified client id.
This id is returned from the subscribe() method, e.g.
>>> client = ws.subscribe(some_callback, 'foo.some_function')
>>> ws.unsubscribe(client)
"""
self._callbacks.pop(client, None)
try:
self._send(action='unsubscribe', client=client)
except:
pass
def call(self, method, *args, **kwargs):
"""
Send a websocket rpc method call, then wait for and return the eventual
response, or raise an exception if we get back an error. This method
will raise an AssertionError after 10 seconds if no response of any
kind was received. The positional and keyword arguments to this method
are used as the arguments to the rpc function call.
"""
finished = Event()
result, error = [], []
callback = self._next_id('callback')
self._callbacks[callback] = {
'callback': lambda response: (result.append(response), finished.set()),
'errback': lambda response: (error.append(response), finished.set())
}
params = self.preprocess(method, args or kwargs)
try:
self._send(method=method, params=params, callback=callback)
except:
self._callbacks.pop(callback, None)
raise
wait_until = datetime.now() + timedelta(seconds=config['ws.call_timeout'])
while datetime.now() < wait_until:
finished.wait(0.1)
if stopped.is_set() or result or error:
break
self._callbacks.pop(callback, None)
assert not stopped.is_set(), 'websocket closed before response was received'
assert result, error[0] if error else 'no response received for 10 seconds'
return result[0]
def make_caller(self, method):
"""
Returns a function which calls the specified method; useful for creating
callbacks, e.g.
>>> authenticate = ws.make_caller('auth.authenticate')
>>> authenticate('username', 'password')
True
Sideboard supports "passthrough subscriptions", e.g.
-> a browser makes a subscription for the "foo.bar" method
-> the server has "foo" registered as a remote service
-> the server creates its own subscription to "foo.bar" on the remote
service and passes all results back to the client as they arrive
This method implements that by checking whether it was called from a
thread with an active websocket as part of a subscription request. If
so then in addition to returning a callable, it also registers the
new subscription with the client websocket so it can be cleaned up when
the client websocket closes and/or when its subscription is canceled.
"""
client = sideboard.lib.threadlocal.get_client()
originating_ws = sideboard.lib.threadlocal.get('websocket')
if client and originating_ws:
sub = originating_ws.passthru_subscriptions.get(client)
if not sub:
sub = _Subscriber(method=method, src_client=client, dst_client=self._next_id('client'), src_ws=originating_ws, dest_ws=self)
originating_ws.passthru_subscriptions[client] = sub
return sub
else:
return lambda *args, **kwargs: self.call(method, *args, **kwargs)
class Model(MutableMapping):
"""
Utility class for representing database objects found in the databases of
other Sideboard plugins. Instances of this class can have their values accessed
as either attributes or dictionary keys.
"""
_prefix = None
_unpromoted = ()
_defaults = None
def __init__(self, data, prefix=None, unpromoted=None, defaults=None):
assert prefix or self._prefix
object.__setattr__(self, '_data', deepcopy(data))
object.__setattr__(self, '_orig_data', deepcopy(data))
object.__setattr__(self, '_prefix', (prefix or self._prefix) + '_')
object.__setattr__(self, '_project_key', self._prefix + 'data')
object.__setattr__(self, '_unpromoted', self._unpromoted if unpromoted is None else unpromoted)
object.__setattr__(self, '_defaults', defaults or self._defaults or {})
@property
def query(self):
assert self.id, 'id was not set'
assert self._model, '_model was not set'
return {'_model': self._model, 'field': 'id', 'value': self.id}
@property
def dirty(self):
return {k: v for k, v in self._data.items() if v != self._orig_data.get(k)}
def to_dict(self):
data = deepcopy(self._data)
serialized = {k: v for k, v in data.pop(self._project_key, {}).items()}
for k in list(data.get('extra_data', {}).keys()):
if k.startswith(self._prefix):
serialized[k[len(self._prefix):]] = data['extra_data'].pop(k)
elif k in self._unpromoted:
serialized[k] = data['extra_data'].pop(k)
serialized.update(data)
return serialized
@property
def _extra_data(self):
return self._data.setdefault('extra_data', {})
def _extra_data_key(self, key):
return ('' if key in self._unpromoted else self._prefix) + key
def __len__(self):
return len(self._data) + len(self._extra_data) + len(self._data.get(self._project_key, {}))
def __setitem__(self, key, value):
assert key != 'id' or value == self.id, 'id is not settable'
if key in self._data:
self._data[key] = value
elif self._project_key in self._data:
self._extra_data.pop(self._prefix + key, None)
self._data[self._project_key][key] = value
else:
self._extra_data[self._extra_data_key(key)] = value
def __getitem__(self, key):
if key in self._data:
return self._data[key]
elif key in self._data.get(self._project_key, {}):
return self._data[self._project_key][key]
else:
return self._extra_data.get(self._extra_data_key(key), self._defaults.get(key))
def __delitem__(self, key):
if key in self._data:
del self._data[key]
elif key in self._data.get(self._project_key, {}):
del self._data[self._project_key][key]
else:
self._extra_data.pop(self._extra_data_key(key), None)
def __iter__(self):
return iter(k for k in self.to_dict() if k != 'extra_data')
def __repr__(self):
return repr(dict(self.items()))
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self, name, value):
return self.__setitem__(name, value)
def __delattr__(self, name):
self.__delitem__(name)
class Subscription(object):
"""
Utility class for opening a websocket to a given destination, subscribing to an rpc call,
and processing the response.
>>> logged_in_users = Subscription('admin.get_logged_in_users')
>>> logged_in_users.result # this will always be the latest return value of your rpc method
If you want to do postprocessing on the results, you can override the "callback" method:
>>> class UserList(Subscription):
... def __init__(self):
... self.usernames = []
... Subscription.__init__(self, 'admin.get_logged_in_users')
...
... def callback(self, users):
... self.usernames = [user['username'] for user in users]
...
>>> users = UserList()
The above code gives you a "users" object with a "usernames" attribute; when Sideboard
starts, it opens a websocket connection to whichever remote server defines the "admin"
service (as defined in the rpc_services config section), then subscribes to the
"admin.get_logged_in_users" method and calls the "callback" method on every response.
"""
def __init__(self, rpc_method, *args, **kwargs):
self.result = None
connect_immediately = kwargs.pop('connect_immediately', False)
self.method, self.args, self.kwargs = rpc_method, args, kwargs
self.ws = sideboard.lib.services.get_websocket(rpc_method.split('.')[0])
on_startup(self._subscribe)
on_shutdown(self._unsubscribe)
if connect_immediately:
self.ws.connect(max_wait=2)
self._subscribe()
def _subscribe(self):
self._client_id = self.ws.subscribe(self._callback, self.method, *self.args, **self.kwargs)
def _unsubscribe(self):
self.ws.unsubscribe(self._client_id)
def refresh(self):
"""
re-fire your subscription method and invoke the callback method with
the response; this will manually check for changes if you are
subscribed to a method which by design doesn't re-fire on every change
"""
assert self.ws.connected, 'cannot refresh {}: websocket not connected'.format(self.method)
self._callback(self.ws.call(self.method, *self.args, **self.kwargs))
def _callback(self, response_data):
self.result = response_data
self.callback(response_data)
def callback(self, response_data):
"""override this to define what to do with your rpc method return values"""
class MultiSubscription(object):
"""
A version of the Subscription utility class which subscribes to an arbitrary
number of remote servers and aggregates the results from each. You invoke
this similarly to Subscription class, with two main differences:
1) The first parameter is a list of hostnames to which we should connect.
Each hostname will have a websocket registered for it if one does not
already exist, using the standard config options under [rpc_services].
2) Unlike the Subscription class, we do not support the connect_immediately
parameter. Because this class looks in the [rpc_services] config section
of every plugin to find the client cert settings, we need to wait for all
plugins to be loaded before trying to connect.
Like the Subscription class, you can instantiate this class directly, e.g.
>>> logged_in_users = MultiSubscription(['host1', 'host2'], 'admin.get_logged_in_users')
>>> logged_in_users.results # this will always be the latest return values of your rpc method
The "results" attribute is a dictionary whose keys are the websocket objects
used to connect to each host, and whose values are the latest return values
from each of those websockets. Hosts for which we have not yet received a
response will have no key/value pair in the "results" dictionary.
If you want to do postprocessing on the results, you can subclass this and
override the "callback" method, e.g.
>>> class UserList(MultiSubscription):
... def __init__(self):
... self.usernames = set()
... MultiSubscription.__init__(self, ['host1', 'host2'], 'admin.get_logged_in_users')
...
... def callback(self, users, ws):
... self.usernames.update(user['username'] for user in users)
...
>>> users = UserList()
The above code gives you a "users" object with a "usernames" attribute; when Sideboard
starts, it opens websocket connections to 'host1' and 'host2', then subscribes to the
"admin.get_logged_in_users" method and calls the "callback" method on every response.
"""
def __init__(self, hostnames, rpc_method, *args, **kwargs):
from sideboard.lib import listify
self.hostnames, self.method, self.args, self.kwargs = listify(hostnames), rpc_method, args, kwargs
self.results, self.websockets, self._client_ids = {}, {}, {}
on_startup(self._subscribe)
on_shutdown(self._unsubscribe)
def _websocket(self, url, ssl_opts):
from sideboard.lib import services
return services._register_websocket(url, ssl_opts=ssl_opts)
def _subscribe(self):
from sideboard.lib._services import _ws_url, _rpc_opts, _ssl_opts
for hostname in self.hostnames:
rpc_opts = _rpc_opts(hostname)
self.websockets[hostname] = self._websocket(_ws_url(hostname, rpc_opts), _ssl_opts(rpc_opts))
for ws in self.websockets.values():
self._client_ids[ws] = ws.subscribe(self._make_callback(ws), self.method, *self.args, **self.kwargs)
def _unsubscribe(self):
for ws in self.websockets.values():
ws.unsubscribe(self._client_ids.get(ws))
def _make_callback(self, ws):
return lambda result_data: self._callback(result_data, ws)
def _callback(self, response_data, ws):
self.results[ws] = response_data
self.callback(response_data, ws)
def callback(self, result_data, ws):
"""override this to define what to do with your rpc method return values"""
def refresh(self):
"""
Sometimes we want to manually re-fire all of our subscription methods to
get the latest data. This is useful in cases where the remote server
isn't necessarily programmed to always push the latest data as soon as
it's available, usually for performance reasons. This method allows the
client to get the latest data more often than the server is programmed
to provide it.
"""
for ws in self.websockets.values():
try:
self._callback(self.ws.call(self.method, *self.args, **self.kwargs), ws)
except:
log.warn('failed to fetch latest data from {} on {}', self.method, ws.url)
|
|
#!/usr/bin/python
#---------------------------------------------------------------------------------------------------
# Show the TA assignments in the database that have been made for a specific semester.
#
#---------------------------------------------------------------------------------------------------
import sys,re,os
import MySQLdb
import Database
def termString(period):
# set term string
termString = period
if period[0] == "I":
termString = "IAP %s"%(period[1:])
elif period[0] == "S":
termString = "Spring %s"%(period[1:])
elif period[0] == "F":
termString = "Fall %s"%(period[1:])
return termString
debug = False #debug = True
check = False
dataDir = os.getenv('TAPAS_TOOLS_DATA','./')
os.chdir(dataDir)
usage = " usage: showAssignment.py <semesterId> <taskType> [ <printEmail> ]\n\n"
usage += " semesterId identification string for a specific semster\n"
usage += " ex. F2013 (Fall 2013), I2013 (IAP 2013), S2013 (Spring 2013)\n"
usage += " taskType description of the type of assignment\n"
usage += " 'full' (fulltime, includes 2xhalf), 'part' (10%,20%IAP)\n\n"
usage += " printEmail generate emails and print email linux commands\n"
usage += " def = '', activate with 'email'\n\n"
if len(sys.argv) < 3:
print "\n ERROR - need to specify the semester id and the relevant task type.\n"
print usage
sys.exit(0)
period = sys.argv[1]
taskType = sys.argv[2]
printEmail = ''
if len(sys.argv) > 3:
printEmail = sys.argv[3]
if taskType == "part":
taskType = "part"
else:
taskType = "full"
# set term string
termString = termString(period)
# Open database connection
db = Database.DatabaseHandle()
# Prepare a cursor object using cursor() method
cursor = db.getCursor()
# Make a new objects of all courses
courses = Database.Container()
rc = courses.fillWithCourses(db.handle,debug)
if rc != 0:
print " ERROR - filling courses."
# disconnect from server
db.disco()
sys.exit()
# Make a new objects of faculties
teachers = Database.Container()
rc = teachers.fillWithTeachers(db.handle)
if rc != 0:
print " ERROR - filling teachers."
# disconnect from server
db.disco()
sys.exit()
# Make a new objects of students
students = Database.Container()
rc = students.fillWithStudents(db.handle)
if rc != 0:
print " ERROR - filling students."
# disconnect from server
db.disco()
sys.exit()
# Remember active courses, teachers and students
activeCourses = Database.Container()
activeTeachers = Database.Container()
activeStudents = Database.Container()
#---------------------------------------------------------------------------------------------------
# Make a complete list of all assignments
#---------------------------------------------------------------------------------------------------
assignments = {}
# Prepare SQL query to select a record from the database.
sql = "select * from Assignments where Term = '" + period + "';"
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch results
results = cursor.fetchall()
for row in results:
term = row[0]
task = row[1]
email = row[2]
# deal with empty assignments first
if email == None or email == '':
print ' WARNING - empty assignment for task: ' + task
continue
if debug:
print " TASK : %s EMAIL: %s"%(task,email)
# decode the course number
number = task.split('-')[1]
# flags to test whether person is part of the database
isTeacher = False
isStudent = False
# find corresponding course in our courses list
try:
course = courses.retrieveElement(number);
#print "%-20s"%(number)
activeCourses.addElement(number,course)
except:
print " ERROR - course is not in our master table (%s)."%(number)
# disconnect from server
db.disco()
sys.exit()
# try if it is a techer in our teachers list
try:
teacher = teachers.retrieveElement(email);
activeTeachers.addElement(email,teacher)
isTeacher = True
# Add teaching teacher to the course
if task.split('-')[2] == 'Lec': ## and task.split('-')[3] == '1':
course = activeCourses.retrieveElement(number);
course.setTeacher(email)
if debug:
course.show()
elif task.split('-')[2] == 'Adm':
course = activeCourses.retrieveElement(number);
course.setAdmin(email)
except:
#print " Not a teacher (%s)"%(email)
teacher = 0
# find the student in our students list
try:
student = students.retrieveElement(email)
activeStudents.addElement(email,student)
isStudent = True
except:
#print " Not a student (%s)"%(email)
student = 0
# store assignment
try:
tmp = assignments[email]
assignments[email] = tmp + ',' + task
except:
assignments[email] = task
# did we find the person in the database
if isStudent or isTeacher:
if check:
print " Found the person."
else:
print " ERROR -- Did not find the person: " + email + " (task: " + task + ")"
#db.disco()
#sys.exit(0)
except:
print " ERROR - unable to complete ACTIVE elements loop."
# disconnect from server
db.disco()
#---------------------------------------------------------------------------------------------------
# Prepare the assignment emails
#---------------------------------------------------------------------------------------------------
# prepare unique list of students that get assignments (there could be several)
departmentEmail = os.getenv('TAPAS_TOOLS_DEPEML','[email protected]')
preAssignment = [ ]
preEmails = ''
teachersEmails = ''
with open("%s/eml/%s/distributor.csv"%(dataDir,period),"w") as f:
f.write("TERM+EMAIL+FIRST_NAME+LAST_NAME+CC+COURSE+PS\n")
for key, assignment in assignments.iteritems():
if debug:
print "\n\n# NEXT # Key: " + key + ' --> ' + assignment
# try:
if True:
try:
student = activeStudents.retrieveElement(key)
except:
if debug:
print ' Not a student (%s) ... moving on to next entry.'%(key)
# make sure to add up all the teachers emails
if re.search('-Lec-',assignment):
if teachersEmails == '':
teachersEmails = key
else:
teachersEmails += "," + key
continue
if debug:
print "\n Assignment for %s %s (%s)"%(student.firstName,student.lastName,key)
if preEmails == '':
preEmails = key
else:
preEmails += "," + key
# filename for the email
filename = period + "_" + taskType + "_" + student.firstName + "_" + student.lastName
# reset the assignment string
assignString = ""
# construct the '*visors' email
additionalCc = student.advisorEmail
if student.supervisorEmail != "?":
additionalCc += ',' + student.supervisorEmail
for task in assignment.split(','):
if assignString != "":
assignString += "\\n"
term = task.split('-')[0]
number = task.split('-')[1]
type = task.split('-')[2]
filename += "_" + number
if debug:
print " FileName: %s"%(filename)
if ((taskType == 'full' and (re.search('TaF',type) or re.search('TaH',type))) or \
(taskType == 'part' and re.search('TaP',type)) ):
if debug:
print " Number: %s"%(number)
try:
course = activeCourses.retrieveElement(number)
except:
print '\n ERROR - Not a registered course (%s). EXIT!\n'%(number)
sys.exit(1)
if debug:
print " Admin: %s"%(course.admin)
try:
teacher = activeTeachers.retrieveElement(course.admin)
except:
print '\n ERROR - Not a registered teacher (%s). EXIT!\n'%(course.admin)
sys.exit(1)
if debug:
print " Course: " + number + " Teacher: " + course.admin
psString = "PS: Please add 12 units of 8.399 to your registration."
if type[2:4] == "PU":
tmp = "%-14s, %-14s TA (U) - %-6s %-40s %s %s (%s)"%\
(student.lastName,student.firstName,course.number,course.name, \
teacher.firstName,teacher.lastName,teacher.eMail)
preAssignment.append(tmp)
psString = ""
assignString += " Part-time Utility TA in course " + course.number + \
" (" + course.name + ") administered by " + \
teacher.firstName + " " + teacher.lastName + \
" (" + teacher.eMail + ")"
elif type[3] == "U":
tmp = "%-14s, %-14s TA (U) - %-6s %-40s %s %s (%s)"%\
(student.lastName,student.firstName,course.number,course.name, \
teacher.firstName,teacher.lastName,teacher.eMail)
preAssignment.append(tmp)
assignString += " Utility TA in course " + course.number + \
" (" + course.name + ") administered by " + \
teacher.firstName + " " + teacher.lastName + \
" (" + teacher.eMail + ")"
elif type[3] == "R" or type[3] == "L":
tmp = "%-14s, %-14s TA (R) - %-6s %-40s %s %s (%s)"%\
(student.lastName,student.firstName,course.number,course.name, \
teacher.firstName,teacher.lastName,teacher.eMail)
preAssignment.append(tmp)
assignString += " Recitation TA in course " + course.number + \
" (" + course.name + ") administered by " + \
teacher.firstName + " " + teacher.lastName + \
" (" + teacher.eMail + ")"
else:
assignString += " ERROR - Unknown TA type found: " + type[3]
# addup the additional teacher to be copied
##additionalCc += "," + teacher.eMail
if course.admin != '[email protected]' and course.admin != 'EMPTY':
additionalCc += "," + course.admin
if debug:
print assignString
if assignString == "":
if debug:
print "No type match %s %s %s %s\n"% \
(student.firstName,student.lastName,key,assignment)
continue
filename += ".eml"
print "\n" + term + " " + student.firstName + " " + student.lastName + "\n" \
+ assignString
cc = "%s,%s"%(additionalCc,departmentEmail)
f.write("%s+%s+%s+%s+%s+%s+%s\n"\
%(termString,student.eMail,student.firstName,student.lastName,cc,assignString,psString))
if printEmail == "email":
cmd = "generateEmail.sh '" + term + "' \"" + student.firstName + " " \
+ student.lastName + "\" '" + assignString +"' \"" + filename + "\" " + taskType
if debug:
print " CMD: " + cmd
os.system(cmd)
print " mail -S [email protected] " + "-c " + additionalCc + "," + departmentEmail \
+ " -s \'TA Assignment " + term + " (" + student.firstName + " " \
+ student.lastName + ")\' " + student.eMail + " < " + dataDir + "/spool/" + filename
# except:
# student = 0
#
#---------------------------------------------------------------------------------------------------
# Print out (pre-)assignment Summary
#---------------------------------------------------------------------------------------------------
print "\nEMAIL TEXT for pre-assignments"
preAssignment.sort();
for task in preAssignment:
print task
print "\nEMAIL ADDRESS for pre-assignments"
print preEmails
print "\nEMAIL ADDRESS for feedback"
print teachersEmails
sys.exit(0)
|
|
#!/usr/bin/env python
"""
This application presents a 'console' prompt to the user asking for Who-Is and I-Am
commands which create the related APDUs, then for each I-Am that is returned, reads
the object name of the device object (often called simply the device name).
"""
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, enable_sleeping
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address, GlobalBroadcast
from bacpypes.apdu import WhoIsRequest, ReadPropertyRequest, ReadPropertyACK
from bacpypes.primitivedata import CharacterString
from bacpypes.errors import MissingRequiredParameter
from bacpypes.app import BIPForeignApplication
from bacpypes.local.device import LocalDeviceObject
# some debugging
_debug = 1
_log = ModuleLogger(globals())
# globals
this_device = None
this_application = None
#
# DiscoveryApplication
#
@bacpypes_debugging
class DiscoveryApplication(BIPForeignApplication):
def __init__(self, *args):
if _debug: DiscoveryApplication._debug("__init__ %r", args)
BIPForeignApplication.__init__(self, *args)
# keep track of requests to line up responses
self.who_is_request = None
def request(self, apdu):
"""Sniff for Who-Is requests going downstream."""
if _debug: DiscoveryApplication._debug("request %r", apdu)
# save a copy of just the Who-Is request
if isinstance(apdu, WhoIsRequest):
self.who_is_request = apdu
# forward it along
BIPForeignApplication.request(self, apdu)
def do_IAmRequest(self, apdu):
"""Do something with incoming I-Am requests."""
if _debug: DiscoveryApplication._debug("do_IAmRequest %r", apdu)
# check for required parameters
if apdu.iAmDeviceIdentifier is None:
raise MissingRequiredParameter("iAmDeviceIdentifier required")
if apdu.maxAPDULengthAccepted is None:
raise MissingRequiredParameter("maxAPDULengthAccepted required")
if apdu.segmentationSupported is None:
raise MissingRequiredParameter("segmentationSupported required")
if apdu.vendorID is None:
raise MissingRequiredParameter("vendorID required")
# extract the device instance number
device_instance = apdu.iAmDeviceIdentifier[1]
if _debug: DiscoveryApplication._debug(" - device_instance: %r", device_instance)
# extract the source address
device_address = apdu.pduSource
if _debug: DiscoveryApplication._debug(" - device_address: %r", device_address)
# we didn't request anything yet
if not self.who_is_request:
return
if (self.who_is_request.deviceInstanceRangeLowLimit is not None) and \
(device_instance < self.who_is_request.deviceInstanceRangeLowLimit):
pass
elif (self.who_is_request.deviceInstanceRangeHighLimit is not None) and \
(device_instance > self.who_is_request.deviceInstanceRangeHighLimit):
pass
else:
# build a request for the object name
request = ReadPropertyRequest(
destination=apdu.pduSource,
objectIdentifier=apdu.iAmDeviceIdentifier,
propertyIdentifier='objectName',
)
# make an IOCB
iocb = IOCB(request)
if _debug: DiscoveryApplication._debug(" - iocb: %r", iocb)
# let us know when its complete
iocb.add_callback(self.device_discovered)
# give it to the application
self.request_io(iocb)
def device_discovered(self, iocb):
if _debug: DiscoveryApplication._debug("device_discovered %r", iocb)
# do something for error/reject/abort
if iocb.ioError:
sys.stdout.write(str(iocb.ioError) + '\n')
# do something for success
elif iocb.ioResponse:
apdu = iocb.ioResponse
# should be an ack
if not isinstance(apdu, ReadPropertyACK):
if _debug: DiscoveryApplication._debug(" - not an ack")
return
# pull out the name
device_name = apdu.propertyValue.cast_out(CharacterString)
if _debug: DiscoveryApplication._debug(" - device_name: %r", device_name)
# print out the response
sys.stdout.write("%s is at %s named %r\n" % (apdu.objectIdentifier[1], apdu.pduSource, device_name))
# do something with nothing?
else:
if _debug: DiscoveryApplication._debug(" - ioError or ioResponse expected")
#
# DiscoveryConsoleCmd
#
@bacpypes_debugging
class DiscoveryConsoleCmd(ConsoleCmd):
def do_whois(self, args):
"""whois [ <addr> ] [ <lolimit> <hilimit> ]"""
args = args.split()
if _debug: DiscoveryConsoleCmd._debug("do_whois %r", args)
try:
# gather the parameters
if (len(args) == 1) or (len(args) == 3):
addr = Address(args[0])
del args[0]
else:
addr = GlobalBroadcast()
if len(args) == 2:
lolimit = int(args[0])
hilimit = int(args[1])
else:
lolimit = hilimit = None
# code lives in the device service
this_application.who_is(lolimit, hilimit, addr)
except Exception as error:
DiscoveryConsoleCmd._exception("exception: %r", error)
def do_rtn(self, args):
"""rtn <addr> <net> ... """
args = args.split()
if _debug: DiscoveryConsoleCmd._debug("do_rtn %r", args)
# provide the address and a list of network numbers
router_address = Address(args[0])
network_list = [int(arg) for arg in args[1:]]
# pass along to the service access point
this_application.nsap.update_router_references(None, router_address, network_list)
#
# __main__
#
def main():
global this_device
global this_application
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug: _log.debug(" - this_device: %r", this_device)
# make a simple application
this_application = DiscoveryApplication(
this_device, args.ini.address,
Address(args.ini.foreignbbmd),
int(args.ini.foreignttl),
)
# make a console
this_console = DiscoveryConsoleCmd()
if _debug: _log.debug(" - this_console: %r", this_console)
# enable sleeping will help with threads
enable_sleeping()
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
|
|
"""
Basic sorting algorithms and their variations
"""
import random
from Numerical import merge_sorted_lists
def insert_sort_inplace(arr):
"""
Sort all the elements in a list in place.
As the elements are picked off from one end
of the list, they are put in sorted order,
at the other end.
RUNTIME: Best - O(n), Avg - O(n^2), Worst - O(n^2)
"""
arrlen = len(arr)
for i in range(1, arrlen):
for j in range(0, i):
if arr[i] < arr[j]:
arr[i], arr[j] = arr[j], arr[i]
return arr
def insert_sort_inplace_variation(arr):
"""
Same as 'insert_sort_inplace'. Difference
being that, newly inserted elements are
inserted from the end of the sorted part of
the list.
RUNTIME: Best - O(n), Avg - O(n^2), Worst - O(n^2)
"""
arrlen = len(arr)
for i in range(1, arrlen):
for j in range(i-1, -1, -1):
if arr[i] < arr[j]:
arr[i], arr[j] = arr[j], arr[i]
i = j #inner loop uses 'i' at point of
j -= 1 #initialization. #outerloop
#re-initializes 'i'
else:
break
return arr
#A useless algorithm.
#Not as bad as bogosort, bozosort.
def bubble_sort_inplace(arr):
"""
Traverse the list as many times as the
number of elements. In each traversal,
the largest element bubbles up to the top.
RUNTIME: Best - O(n), Avg - O(n^2), Worst - O(n^2)
"""
arrlen = len(arr)
for i in range(arrlen-1, 0, -1):
for j in range(0, i):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
#Utterly useless
def selection_sort_inplace(arr):
"""
Every iteration through the list, find
the smallest element and swap it with
the last index in the sorted part of the
list.
RUNTIME: Best - O(n^2), Avg - O(n^2), Worst - O(n^2)
"""
arrlen = len(arr)
for i in range(0, arrlen - 1):
cm = i
for j in range(i+1, arrlen):
if arr[j] < arr[cm]:
cm = j
arr[i], arr[cm] = arr[cm], arr[i]
return arr
def pivot_partition(arr, lefti, righti, pivoti):
"""
Partition algorithm used by median sort to split
items based on pivot. Left side of pivot are items
smaller than pivot. Right side are items greater
than pivot.
RUNTIME: Worst - O(n^2)
"""
arr[righti], arr[pivoti] = arr[pivoti], arr[righti]
correct_pivoti = lefti
for i in range(lefti, righti):
if arr[i] <= arr[righti]:
arr[correct_pivoti], arr[i] = arr[i], arr[correct_pivoti]
correct_pivoti += 1
arr[correct_pivoti], arr[righti] = arr[righti], arr[correct_pivoti]
return correct_pivoti
def median_element(arr, ki, lefti, righti):
"""
This finds the actual median element by partitioning
the array multiple times until median element is found
RUNTIME:
"""
while True:
pivoti = random.randint(lefti, righti)
newp = pivot_partition(arr, lefti, righti, pivoti)
if newp == ki:
break
elif ki < newp:
righti = newp - 1
else:
lefti = newp + 1
return arr
def median_sort_inplace(arr, lefti, righti):
#TODO: Runtime needs confirmation
"""
Sorts the elements by recursively finding the
median of the arr, their sub-arrays and so on.
RUNTIME: Best - O(n log n), Avg - O(n log n),
Worst - O(n^2)
"""
if righti <= lefti:
return
ki = lefti + (righti - lefti + 1)/2
median_element(arr, ki, lefti, righti)
median_sort_inplace(arr, lefti, ki-1)
median_sort_inplace(arr, ki+1, righti)
return arr
def merge_sort_inplace(arr, lefti, righti):
"""
Sorts the array by iteratively splitting the array
into smaller and smaller array until, the array
cannot be split anymore at which point they are
joined together using the algorithm to merge 2
sorted lists. The merges take place from inside out.
RUNTIME: Best - O(n log n), Avg - O(n log n), Worst - O(n log n)
"""
size = righti - lefti + 1
if size < 2:
return
half = size/2 + lefti - 1
merge_sort_inplace(arr, lefti, half)
merge_sort_inplace(arr, half+1, righti)
merge_sorted_lists(arr, lefti, half, half+1, righti)
return arr
def quicksort_middle_pivot(arr):
"""
Divide and Conquer algorithm. List broken down
into smaller and smaller lists and then sorted.
This is not inplace. Every recursive call
creates 2 extra lists. Middle element is used
as pivot
RUNTIME: Best - O(n log n), Avg - O(n log n), Worst - O(n^2)
"""
lenarr = len(arr)
if lenarr <= 1:
return arr
pivot = lenarr/2
lessp = list()
largerp = list()
for v in range(pivot):
if arr[v] <= arr[pivot]:
lessp.append(arr[v])
else:
largerp.append(arr[v])
for v in range(pivot+1, lenarr):
if arr[v] <= arr[pivot]:
lessp.append(arr[v])
else:
largerp.append(arr[v])
sleft = quicksort_middle_pivot(lessp)
sright = quicksort_middle_pivot(largerp)
sleft.append(arr[pivot])
sleft.extend(sright)
return sleft
def radix_sort(arr, minradix, maxradix):
#TODO: Take another parameter that gives the max number of
# digits/chars for any single element in array
"""
An implementation of MSD radix sort. Sorts elements
based on the individual digits in the number or
characters in a string. The runtime depends on the
size of the array and the number of digits or characters
in the biggest number or string respectively.
RUNTIME: (nk)
"""
if minradix >= maxradix:
return arr
buckets = dict()
for v in arr:
val_index = int(str(v)[minradix])
l = buckets.get(val_index, [])
l.append(v)
buckets[val_index] = l
for i in buckets.iterkeys():
if len(buckets[i])>1:
buckets[i] = radix_sort(buckets[i], minradix+1, maxradix)
l = list()
#There will always be only a maximum of 10 keys to sort.
#So 'k' is the number of times the keys of bucket will be sorted.
#'k' being the number of digits in largest number
for i in sorted(buckets.keys()):
l.extend(buckets[i])
return l
def bucket_sort():
#TODO:
pass
def quicksort_random_pivot():
#TODO:
pass
def heap_sort_arr(heap):
from arr_heap import heapify
from arr_heap import extract_max
lheap = len(heap)
heap = heapify(heap, lheap, cmp)
l = list()
while heap:
heap, mx = extract_max(heap, lheap)
lheap -= 1
l.append(mx)
return list(reversed(l))
def counting_sort(arr, mx):
"""
Used for sorting a list of elements N where
each element is in the range [0, k). Counting
sort is a good algorithm in situations where
N is much greater than k.
RUNTIME: Best - O(n), Avg - O(n), Worst - O(n)
"""
mx += 1 #This is to compensate for the fact that counting sort
#can only sort from [0, k).
s = [0] * mx
for v in arr:
s[v] += 1
idx = 0
for i in range(mx):
while s[i] > 0:
arr[idx] = i
idx += 1
s[i] -= 1
return arr
def sorting_test(fn, rndlist, *args):
"""
This is a test function that takes a function
as input and calls the function with a random
list of numbers. Verifies that the returned
list is sorted.
"""
if args:
rndlist = fn(rndlist, *args)
else:
rndlist = fn(rndlist)
for i in range(1, len(rndlist)):
if rndlist[i] < rndlist[i-1]:
print rndlist
print "Error at Index: ", i
return False
return True
def mk_rnd_ls(mn, mx, totalnums):
l = [0] * totalnums
for i in range(totalnums):
l[i] = random.randint(mn, mx)
return l
|
|
__all__ = ['Distribution']
import re
import os
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsSetupError)
from distutils.util import rfc822_escape
from setuptools.extern import six
from setuptools.extern.six.moves import map
from pkg_resources.extern import packaging
from setuptools.depends import Require
from setuptools import windows_support
from setuptools.monkey import get_unpatched
import pkg_resources
def _get_unpatched(cls):
warnings.warn("Do not call this function", DeprecationWarning)
return get_unpatched(cls)
# Based on Python 3.5 version
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
version = '1.2'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name())
file.write('Version: %s\n' % self.get_version())
file.write('Summary: %s\n' % self.get_description())
file.write('Home-page: %s\n' % self.get_url())
file.write('Author: %s\n' % self.get_contact())
file.write('Author-email: %s\n' % self.get_contact_email())
file.write('License: %s\n' % self.get_license())
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
file.write('Requires-Python: %s\n' % self.python_requires)
# from Python 3.4
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
)
def assert_string_list(dist, attr, value):
"""Verify that value is a string list or None"""
try:
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError):
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
)
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist, attr, value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k, v in value.items():
if ':' in k:
k, m = k.split(':', 1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: " + m)
list(pkg_resources.parse_requirements(v))
except (TypeError, ValueError, AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
)
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except packaging.specifiers.InvalidSpecifier as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e)
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if isinstance(value, dict):
for k, v in value.items():
if not isinstance(k, str):
break
try:
iter(v)
except TypeError:
break
else:
return
raise DistutilsSetupError(
attr + " must be a dictionary mapping package names to lists of "
"wildcard patterns"
)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(_Distribution):
"""Distribution with support for features, tests, and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'features' **deprecated** -- a dictionary mapping option names to
'setuptools.Feature'
objects. Features are a portion of the distribution that can be
included or excluded based on user options, inter-feature dependencies,
and availability on the current system. Excluded features are omitted
from all setup commands, including source and binary distributions, so
you can create multiple distributions from the same source tree.
Feature names should be valid Python identifiers, except that they may
contain the '-' (minus) sign. Features can be included or excluded
via the command line options '--with-X' and '--without-X', where 'X' is
the name of the feature. Whether a feature is included by default, and
whether you are allowed to control this from the command line, is
determined by the Feature object. See the 'Feature' class for more
information.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution. They are used by the feature subsystem to configure the
distribution for the included and excluded features.
"""
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
_attrs_dict = attrs or {}
if 'features' in _attrs_dict or 'require_features' in _attrs_dict:
Feature.warn_deprecated()
self.require_features = []
self.features = {}
self.dist_files = []
self.src_root = attrs and attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
# Make sure we have any eggs needed to interpret 'attrs'
if attrs is not None:
self.dependency_links = attrs.pop('dependency_links', [])
assert_string_list(self, 'dependency_links', self.dependency_links)
if attrs and 'setup_requires' in attrs:
self.fetch_build_eggs(attrs['setup_requires'])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, attrs)
if isinstance(self.metadata.version, numbers.Number):
# Some people apparently take "version number" too literally :)
self.metadata.version = str(self.metadata.version)
if self.metadata.version is not None:
try:
ver = packaging.version.Version(self.metadata.version)
normalized_version = str(ver)
if self.metadata.version != normalized_version:
warnings.warn(
"Normalizing '%s' to '%s'" % (
self.metadata.version,
normalized_version,
)
)
self.metadata.version = normalized_version
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % self.metadata.version
)
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
def parse_command_line(self):
"""Process features after parsing command line options"""
result = _Distribution.parse_command_line(self)
if self.features:
self._finalize_features()
return result
def _feature_attrname(self, name):
"""Convert feature name to corresponding option attribute name"""
return 'with_' + name.replace('-', '_')
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
def finalize_options(self):
_Distribution.finalize_options(self)
if self.features:
self._set_global_opts_from_features()
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
try:
cmd = self._egg_fetcher
cmd.package_index.to_scan = []
except AttributeError:
from setuptools.command.easy_install import easy_install
dist = self.__class__({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
install_dir = self.get_egg_cache_dir()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir, exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
self._egg_fetcher = cmd
return cmd.easy_install(req)
def _set_global_opts_from_features(self):
"""Add --with-X/--without-X options based on optional features"""
go = []
no = self.negative_opt.copy()
for name, feature in self.features.items():
self._set_feature(name, None)
feature.validate(self)
if feature.optional:
descr = feature.description
incdef = ' (default)'
excdef = ''
if not feature.include_by_default():
excdef, incdef = incdef, excdef
go.append(('with-' + name, None, 'include ' + descr + incdef))
go.append(('without-' + name, None, 'exclude ' + descr + excdef))
no['without-' + name] = 'with-' + name
self.global_options = self.feature_options = go + self.global_options
self.negative_opt = self.feature_negopt = no
def _finalize_features(self):
"""Add/remove features and resolve dependencies between them"""
# First, flag all the enabled items (and thus their dependencies)
for name, feature in self.features.items():
enabled = self.feature_is_included(name)
if enabled or (enabled is None and feature.include_by_default()):
feature.include_in(self)
self._set_feature(name, 1)
# Then disable the rest, so that off-by-default features don't
# get flagged as errors when they're required by an enabled feature
for name, feature in self.features.items():
if not self.feature_is_included(name):
feature.exclude_from(self)
self._set_feature(name, 0)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
for ep in pkg_resources.iter_entry_points('distutils.commands', command):
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def _set_feature(self, name, status):
"""Set feature's inclusion status"""
setattr(self, self._feature_attrname(name), status)
def feature_is_included(self, name):
"""Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
return getattr(self, self._feature_attrname(name))
def include_feature(self, name):
"""Request inclusion of feature named 'name'"""
if self.feature_is_included(name) == 0:
descr = self.features[name].description
raise DistutilsOptionError(
descr + " is required, but was excluded or is not available"
)
self.features[name].include_in(self)
self._set_feature(name, 1)
def include(self, **attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
if include:
include(v)
else:
self._include_misc(k, v)
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self, package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package + '.'
for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
return True
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is not None and not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self, name, [item for item in old if item not in value])
def _include_misc(self, name, value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
)
if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
else:
setattr(self, name, old + [item for item in value if item not in old])
def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v)
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias, True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd, opts in self.command_options.items():
for opt, (src, val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_', '-')
if val == 0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val == 1:
val = None
d.setdefault(cmd, {})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
import io
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
class Feature:
"""
**deprecated** -- The `Feature` facility was never completely implemented
or supported, `has reported issues
<https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
a future version.
A subset of the distribution that can be excluded if unneeded/wanted
Features are created using these keyword arguments:
'description' -- a short, human readable description of the feature, to
be used in error messages, and option help messages.
'standard' -- if true, the feature is included by default if it is
available on the current system. Otherwise, the feature is only
included if requested via a command line '--with-X' option, or if
another included feature requires it. The default setting is 'False'.
'available' -- if true, the feature is available for installation on the
current system. The default setting is 'True'.
'optional' -- if true, the feature's inclusion can be controlled from the
command line, using the '--with-X' or '--without-X' options. If
false, the feature's inclusion status is determined automatically,
based on 'availabile', 'standard', and whether any other feature
requires it. The default setting is 'True'.
'require_features' -- a string or sequence of strings naming features
that should also be included if this feature is included. Defaults to
empty list. May also contain 'Require' objects that should be
added/removed from the distribution.
'remove' -- a string or list of strings naming packages to be removed
from the distribution if this feature is *not* included. If the
feature *is* included, this argument is ignored. This argument exists
to support removing features that "crosscut" a distribution, such as
defining a 'tests' feature that removes all the 'tests' subpackages
provided by other features. The default for this argument is an empty
list. (Note: the named package(s) or modules must exist in the base
distribution when the 'setup()' function is initially called.)
other keywords -- any other keyword arguments are saved, and passed to
the distribution's 'include()' and 'exclude()' methods when the
feature is included or excluded, respectively. So, for example, you
could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
added or removed from the distribution as appropriate.
A feature must include at least one 'requires', 'remove', or other
keyword argument. Otherwise, it can't affect the distribution in any way.
Note also that you can subclass 'Feature' to create your own specialized
feature types that modify the distribution in other ways when included or
excluded. See the docstrings for the various methods here for more detail.
Aside from the methods, the only feature attributes that distributions look
at are 'description' and 'optional'.
"""
@staticmethod
def warn_deprecated():
warnings.warn(
"Features are deprecated and will be removed in a future "
"version. See https://github.com/pypa/setuptools/issues/65.",
DeprecationWarning,
stacklevel=3,
)
def __init__(self, description, standard=False, available=True,
optional=True, require_features=(), remove=(), **extras):
self.warn_deprecated()
self.description = description
self.standard = standard
self.available = available
self.optional = optional
if isinstance(require_features, (str, Require)):
require_features = require_features,
self.require_features = [
r for r in require_features if isinstance(r, str)
]
er = [r for r in require_features if not isinstance(r, str)]
if er:
extras['require_features'] = er
if isinstance(remove, str):
remove = remove,
self.remove = remove
self.extras = extras
if not remove and not require_features and not extras:
raise DistutilsSetupError(
"Feature %s: must define 'require_features', 'remove', or at least one"
" of 'packages', 'py_modules', etc."
)
def include_by_default(self):
"""Should this feature be included by default?"""
return self.available and self.standard
def include_in(self, dist):
"""Ensure feature and its requirements are included in distribution
You may override this in a subclass to perform additional operations on
the distribution. Note that this method may be called more than once
per feature, and so should be idempotent.
"""
if not self.available:
raise DistutilsPlatformError(
self.description + " is required, "
"but is not available on this platform"
)
dist.include(**self.extras)
for f in self.require_features:
dist.include_feature(f)
def exclude_from(self, dist):
"""Ensure feature is excluded from distribution
You may override this in a subclass to perform additional operations on
the distribution. This method will be called at most once per
feature, and only after all included features have been asked to
include themselves.
"""
dist.exclude(**self.extras)
if self.remove:
for item in self.remove:
dist.exclude_package(item)
def validate(self, dist):
"""Verify that feature makes sense in context of distribution
This method is called by the distribution just before it parses its
command line. It checks to ensure that the 'remove' attribute, if any,
contains only valid package/module names that are present in the base
distribution when 'setup()' is called. You may override it in a
subclass to perform any other required validation of the feature
against a target distribution.
"""
for item in self.remove:
if not dist.has_contents_for(item):
raise DistutilsSetupError(
"%s wants to be able to remove %s, but the distribution"
" doesn't contain any packages or modules under %s"
% (self.description, item, item)
)
|
|
# ==============================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upgrade script to move from pre-release schema to new schema.
Usage examples:
bazel run tensorflow/contrib/lite/schema/upgrade_schema -- in.json out.json
bazel run tensorflow/contrib/lite/schema/upgrade_schema -- in.bin out.bin
bazel run tensorflow/contrib/lite/schema/upgrade_schema -- in.bin out.json
bazel run tensorflow/contrib/lite/schema/upgrade_schema -- in.json out.bin
bazel run tensorflow/contrib/lite/schema/upgrade_schema -- in.tflite out.tflite
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import tensorflow as tf
from tensorflow.python.platform import resource_loader
parser = argparse.ArgumentParser(
description="Script to move TFLite models from pre-release schema to"
" new schema.")
parser.add_argument(
"input",
type=str,
help="Input TensorFlow lite file in `.json`, `.bin` or `.tflite` format.")
parser.add_argument(
"output",
type=str,
help="Output json or bin TensorFlow lite model compliant with"
"the new schema. Extension must be `.json`, `.bin` or `.tflite`.")
# RAII Temporary Directory, because flatc doesn't allow direct use of tempfiles.
@contextlib.contextmanager
def TemporaryDirectoryResource():
temporary = tempfile.mkdtemp()
try:
yield temporary
finally:
shutil.rmtree(temporary)
class Converter(object):
"""Converts TensorFlow flatbuffer models from old to new version of schema.
This can convert between any version to the latest version. It uses
an incremental upgrade strategy to go from version to version.
Usage:
converter = Converter()
converter.Convert("a.tflite", "a.json")
converter.Convert("b.json", "b.tflite")
"""
def __init__(self):
# TODO(aselle): make this work in the open source version with better
# path.
paths_to_try = [
"../../../../flatbuffers/flatc", # not bazel
"../../../../external/flatbuffers/flatc" # bazel
]
for p in paths_to_try:
self._flatc_path = resource_loader.get_path_to_datafile(p)
if os.path.exists(self._flatc_path): break
def FindSchema(base_name):
return resource_loader.get_path_to_datafile("%s" % base_name)
# Supported schemas for upgrade.
self._schemas = [
(0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1),
(1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2),
(2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3),
(3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design.
]
# Ensure schemas are sorted, and extract latest version and upgrade
# dispatch function table.
self._schemas.sort()
self._new_version, self._new_schema = self._schemas[-1][:2]
self._upgrade_dispatch = dict(
(version, dispatch)
for version, unused1, unused2, dispatch in self._schemas)
def _Read(self, input_file, schema, raw_binary=False):
"""Read a tflite model assuming the given flatbuffer schema.
If `input_file` is in bin, then we must use flatc to convert the schema
from binary to json.
Args:
input_file: a binary (flatbuffer) or json file to read from. Extension
must be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or
FlatBuffer JSON.
schema: which schema to use for reading
raw_binary: whether to assume raw_binary (versions previous to v3)
that lacked file_identifier require this.
Raises:
RuntimeError: When flatc cannot be invoked.
ValueError: When the extension is not json or bin.
Returns:
A dictionary representing the read tflite model.
"""
raw_binary = ["--raw-binary"] if raw_binary else []
with TemporaryDirectoryResource() as tempdir:
basename = os.path.basename(input_file)
basename_no_extension, extension = os.path.splitext(basename)
if extension in [".bin", ".tflite"]:
# Convert to json using flatc
returncode = subprocess.call([
self._flatc_path,
"-t",
"--strict-json",
"--defaults-json",
] + raw_binary + ["-o", tempdir, schema, "--", input_file])
if returncode != 0:
raise RuntimeError("flatc failed to convert from binary to json.")
json_file = os.path.join(tempdir, basename_no_extension + ".json")
if not os.path.exists(json_file):
raise RuntimeError("Could not find %r" % json_file)
elif extension == ".json":
json_file = input_file
else:
raise ValueError("Invalid extension on input file %r" % input_file)
return json.load(open(json_file))
def _Write(self, data, output_file):
"""Output a json or bin version of the flatbuffer model.
Args:
data: Dict representing the TensorFlow Lite model to write.
output_file: filename to write the converted flatbuffer to. (json,
tflite, or bin extension is required).
Raises:
ValueError: When the extension is not json or bin
RuntimeError: When flatc fails to convert json data to binary.
"""
_, extension = os.path.splitext(output_file)
with TemporaryDirectoryResource() as tempdir:
if extension == ".json":
json.dump(data, open(output_file, "w"), sort_keys=True, indent=2)
elif extension in [".tflite", ".bin"]:
input_json = os.path.join(tempdir, "temp.json")
with open(input_json, "w") as fp:
json.dump(data, fp, sort_keys=True, indent=2)
returncode = subprocess.call([
self._flatc_path, "-b", "--defaults-json", "--strict-json", "-o",
tempdir, self._new_schema, input_json
])
if returncode != 0:
raise RuntimeError("flatc failed to convert upgraded json to binary.")
shutil.copy(os.path.join(tempdir, "temp.tflite"), output_file)
else:
raise ValueError("Invalid extension on output file %r" % output_file)
def _Upgrade0To1(self, data):
"""Upgrade data from Version 0 to Version 1.
Changes: Added subgraphs (which contains a subset of formally global
entries).
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
subgraph = {}
for key_to_promote in ["tensors", "operators", "inputs", "outputs"]:
subgraph[key_to_promote] = data[key_to_promote]
del data[key_to_promote]
data["subgraphs"] = [subgraph]
def _Upgrade1To2(self, data):
"""Upgrade data from Version 1 to Version 2.
Changes: Rename operators to Conform to NN API.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
Raises:
ValueError: Throws when model builtins are numeric rather than symbols.
"""
def RemapOperator(opcode_name):
"""Go from old schema op name to new schema op name.
Args:
opcode_name: String representing the ops (see :schema.fbs).
Returns:
Converted opcode_name from V1 to V2.
"""
old_name_to_new_name = {
"CONVOLUTION": "CONV_2D",
"DEPTHWISE_CONVOLUTION": "DEPTHWISE_CONV_2D",
"AVERAGE_POOL": "AVERAGE_POOL_2D",
"MAX_POOL": "MAX_POOL_2D",
"L2_POOL": "L2_POOL_2D",
"SIGMOID": "LOGISTIC",
"L2NORM": "L2_NORMALIZATION",
"LOCAL_RESPONSE_NORM": "LOCAL_RESPONSE_NORMALIZATION",
"Basic_RNN": "RNN",
}
return (old_name_to_new_name[opcode_name]
if opcode_name in old_name_to_new_name else opcode_name)
def RemapOperatorType(operator_type):
"""Remap operator structs from old names to new names.
Args:
operator_type: String representing the builtin operator data type
string.
(see :schema.fbs).
Returns:
Upgraded builtin operator data type as a string.
"""
old_to_new = {
"PoolOptions": "Pool2DOptions",
"DepthwiseConvolutionOptions": "DepthwiseConv2DOptions",
"ConvolutionOptions": "Conv2DOptions",
"LocalResponseNormOptions": "LocalResponseNormalizationOptions",
"BasicRNNOptions": "RNNOptions",
}
return (old_to_new[operator_type]
if operator_type in old_to_new else operator_type)
for subgraph in data["subgraphs"]:
for ops in subgraph["operators"]:
ops["builtin_options_type"] = RemapOperatorType(
ops["builtin_options_type"])
# Upgrade the operator codes
for operator_code in data["operator_codes"]:
# Check if builtin_code is the appropriate string type
# use type("") instead of str or unicode. for py2and3
if not isinstance(operator_code["builtin_code"], type(u"")):
raise ValueError("builtin_code %r is non-string. this usually means"
"your model has consistency problems." %
(operator_code["builtin_code"]))
operator_code["builtin_code"] = (RemapOperator(
operator_code["builtin_code"]))
def _Upgrade2To3(self, data):
"""Upgrade data from Version 2 to Version 3.
Changed actual read-only tensor data to be in a buffers table instead
of inline with the tensor.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
buffers = [{"data": []}] # Start with 1 empty buffer
for subgraph in data["subgraphs"]:
if "tensors" not in subgraph:
continue
for tensor in subgraph["tensors"]:
if "data_buffer" not in tensor:
tensor["buffer"] = 0
else:
if tensor["data_buffer"]:
tensor[u"buffer"] = len(buffers)
buffers.append({"data": tensor["data_buffer"]})
else:
tensor["buffer"] = 0
del tensor["data_buffer"]
data["buffers"] = buffers
def _PerformUpgrade(self, data):
"""Manipulate the `data` (parsed JSON) based on changes in format.
This incrementally will upgrade from version to version within data.
Args:
data: Dictionary representing the TensorFlow data. This will be upgraded
in place.
"""
while data["version"] < self._new_version:
self._upgrade_dispatch[data["version"]](data)
data["version"] += 1
def Convert(self, input_file, output_file):
"""Perform schema conversion from input_file to output_file.
Args:
input_file: Filename of TensorFlow Lite data to convert from. Must
be `.json` or `.bin` extension files for JSON or Binary forms of
the TensorFlow FlatBuffer schema.
output_file: Filename to write to. Extension also must be `.json`
or `.bin`.
Raises:
RuntimeError: Generated when none of the upgrader supported schemas
matche the `input_file` data.
"""
# Read data in each schema (since they are incompatible). Version is
# always present. Use the read data that matches the version of the
# schema.
for version, schema, raw_binary, _ in self._schemas:
try:
data_candidate = self._Read(input_file, schema, raw_binary)
except RuntimeError:
continue # Skip and hope another schema works
if "version" not in data_candidate: # Assume version 1 if not present.
data_candidate["version"] = 1
elif data_candidate["version"] == 0: # Version 0 doesn't exist in wild.
data_candidate["version"] = 1
if data_candidate["version"] == version:
self._PerformUpgrade(data_candidate)
self._Write(data_candidate, output_file)
return
raise RuntimeError("No schema that the converter understands worked with "
"the data file you provided.")
def main(argv):
del argv
Converter().Convert(FLAGS.input, FLAGS.output)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
import re
import os
import sys
import logging
import argparse
import multiprocessing as mp
import sqlalchemy as sa
import db
title_pattern = re.compile("#\*([^\r\n]*)")
author_pattern = re.compile("#@([^\r\n]*)")
year_pattern = re.compile("#year([0-9]*)")
venue_pattern = re.compile("#conf([^\r\n]*)")
citation_pattern = re.compile('#citation([^\r\n]*)')
id_pattern = re.compile("#index([^\r\n]*)")
arnetid_pattern = re.compile('#arnetid([^\r\n]*)')
refs_pattern = re.compile("#%([^\r\n]*)")
abstract_pattern = re.compile("#!([^\r\n]*)")
class Record(object):
__slots__ = ['id', 'title', 'authors', 'venue', 'refs', 'abstract', 'year']
def __init__(self, id, title, authors, venue, refs, abstract, year):
self.id = int(id)
self.title = title
self.venue = venue
self.refs = [int(ref) for ref in refs]
self.abstract = abstract if abstract else None
self.year = int(year) if year else None
self.authors = [a for a in authors.split(',') if a]
def match(line, pattern):
m = pattern.match(line)
return m.groups()[0].decode('utf-8').strip() if m else None
def fmatch(f, pattern):
return match(f.readline(), pattern)
def nextrecord(f):
"""Assume file pos is at beginning of record and read to end. Returns all
components as a Record. Assume components are listed in the following order:
title
authors
year
venue
id
arnetid
references (0 or more lines)
abstract (0 or 1 line)
"""
title = fmatch(f, title_pattern)
if title is None:
return None
# truncate for varchar(255) field
if len(title) > 255:
title = title[0:255]
authors = fmatch(f, author_pattern)
year = fmatch(f, year_pattern)
venue = fmatch(f, venue_pattern)
citation_num = fmatch(f, citation_pattern)
paperid = fmatch(f, id_pattern)
arnetid = fmatch(f, arnetid_pattern)
# process reference list
refs = []
line = f.readline()
m = match(line, refs_pattern)
while m is not None:
if m:
refs.append(m)
line = f.readline()
m = match(line, refs_pattern)
# once refs have been processed, all that remains is optional abstract
abstract = match(line, abstract_pattern)
if line != '\n':
f.readline() # consume blank line
return Record(
id=paperid,
title=title,
authors=authors,
year=year,
venue=venue,
refs=refs,
abstract=abstract
)
def castrecord(record):
record['id'] = int(record['id'])
record['refs'] = [int(ref) for ref in record['refs']]
abstract = record['abstract']
record['abstract'] = abstract if abstract else None
year = record['year']
record['year'] = int(year) if year else None
author = record['authors']
if ',' in author:
record['authors'] = [a for a in author.split(',') if a]
else:
record['authors'] = [author]
return record
def iterrecords(fpath):
with open(fpath) as f:
num_records = f.readline().strip()
logging.info('processing %s records' % num_records)
record = nextrecord(f)
while record is not None:
yield record
record = nextrecord(f)
def insert(conn, ins):
"""Attempt to run an insertion statement; return results, None if error."""
try:
ins_res = conn.execute(ins)
except sa.exc.IntegrityError as err:
# a paper already exists with this id
logging.error(str(err))
return None
except Exception as e:
logging.error('unexpected exception\n%s', str(e))
return None
else:
return ins_res
def person_insert(conn, name):
sel = sa.sql.text("SELECT id FROM person WHERE LOWER(name)=LOWER(:n)")
res = conn.execute(sel, n=name)
p = res.first()
if p is not None:
return p['id']
ins = db.person.insert().values(name=name)
try:
res = conn.execute(ins)
except sa.exc.IntegrityError: # concurrency issue
res = conn.execute(sel, n=name)
p = res.first()
if p is None:
raise
else:
return p['id']
return res.inserted_primary_key[0]
def process_record(record):
"""Update the database with the contents of the record."""
logging.debug('processing record\n%s' % record);
conn = db.engine.connect()
paper_id = record.id
ins = db.papers.insert().\
values(id=paper_id, title=record.title,
venue=record.venue, year=record.year,
abstract=record.abstract)
# attempt to insert a new paper into the db
result = insert(conn, ins)
if result is None:
# since ids come from data, we've already processed this record
conn.close()
return False
# make new records for each author
for author in record.authors:
person_id = person_insert(conn, author)
ins = db.authors.insert().values(paper=paper_id, person=person_id)
insert(conn, ins) # may fail, but we don't really care
for ref in record.refs:
ins = db.refs.insert().values(paper=paper_id, ref=ref)
insert(conn, ins)
conn.close()
return True # success
def process_records(fpath):
"""Process all records in data file."""
processed = 0
successful = 0
for record in iterrecords(fpath):
try:
success = process_record(record)
except Exception as e:
logging.info('unexpected exception in `process_record`')
logging.error(str(e))
success = False
processed += 1
if success:
successful += 1
if processed % 20 == 0:
logging.info('processed: %d records' % processed)
logging.info('successful: %d' % successful)
def make_parser():
parser = argparse.ArgumentParser(
description="parse dblp data")
parser.add_argument(
'fpath', action='store',
help='file to parse data from')
parser.add_argument(
'-v', '--verbose', action='store_true',
help="turn on verbose logging")
parser.add_argument(
'-vv', '--very-verbose', action='store_true',
help='turn on very verbose logging')
return parser
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s][%(levelname)s]: %(message)s')
elif args.very_verbose:
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s][%(levelname)s]: %(message)s')
db.engine.echo = True
else:
logging.basicConfig(level=logging.CRITICAL)
db.engine.echo = False
# f = open(args.fpath)
# r = nextrecord(f)
# c = castrecord(r)
try:
process_records(args.fpath)
except Exception as err:
logging.info('ERROR OCCURED IN `process_records`')
logging.error(str(err))
sys.exit(-1)
sys.exit(0)
|
|
# This script is going to create the skelton of our LTER
# database and begin to populate it with raw data
# Note this is the newer version of the schema
# that was based on the meeting that tool place on
# December 21, 2015 with Aldo, Tom, and myself
# In addition rather than using psycopg2 as a module
# to populate the database, we are strictly using
# sqlalchemy with a psycog
# interpreter as our module for talking to postgresql
# THIS ASSUMES YOU HAVE THE DATABASE ALREADY CREATED
# IN POSTGRESQL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import *
from sqlalchemy.dialects.postgresql import *
import pandas as pd
import sys, os
if sys.platform == "darwin":
rootpath = (
"/Users/bibsian/Desktop/git/database-development/")
end = "/"
elif sys.platform == "win32":
rootpath = (
"C:\\Users\MillerLab\\Desktop\\database-development\\")
end = "\\"
lterex = pd.read_csv(
rootpath + 'db' + end + 'lter_table_test.csv')
ltertablename = 'lter_table'
# Here we are using the packageage sqlalchemy
# connecting to the lter databse
# by specifying the title of the database
# and the user name we will be working under.
# Note, postgres is the super user and can do
# everything possible (CREATE,INSERT, MANIPULATE, etc.)
#create_engine = create_engine(
# 'postgresql+psycopg2:///',
# echo=True)
#conn = create_engine.connect()
#conn.execute("commit")
#conn.execute("CREATE DATABASE popler_3")
#conn.close()
#create_engine.dispose()
engine = create_engine(
'postgresql+psycopg2://--/popler_test',
echo=True)
# Note that the relationships in the database (i.e. entity-relation
#-ship diagram or ED diagram) can be visualized after all the tables
# have been created. So, the first step to setting up the skeleton
# of out LTER database
# is going to be to create all tables, and attributes within each,
# and then use our open source database software manager (DBeaver)
# to visualize the layout
########################
# Table creation
#########################
# Now we're going to begin to create the tables and
# specify their attributes/attribute classes
# The first step of this process is to create a database
# metadata catalog. With the object title 'metadata', we
# can create all the tables, their columns, primary, and foreign
# keys from the 'Table' command and use the 'metadata' object
# to compile all the information. Then it can be written
# to the postegresql database with a special method called
# 'create_all'
metadata = MetaData()
# Raw climate station data
climate_raw_table = Table(
'climate_raw_table', metadata,
Column('metarecordid_', Integer, primary_key=True),
Column('title', TEXT),
Column('stationid', None, ForeignKey(
'climate_station_table.stationid', ondelete="CASCADE")),
Column('year', NUMERIC),
Column('month', NUMERIC),
Column('day', NUMERIC),
# Terrestiral environmental variables
Column('avetempobs', NUMERIC),
Column('avetempmeasure', VARCHAR(50)),
Column('mintempobs', NUMERIC),
Column('mintempmeasure', VARCHAR(50)),
Column('maxtempobs', NUMERIC),
Column('maxtempmeasure', VARCHAR(50)),
Column('aveprecipobs', NUMERIC),
Column('aveprecipmeasure', VARCHAR(50)),
Column('minprecipobs', NUMERIC),
Column('minprecipmeasure', VARCHAR(50)),
Column('maxprecipobs', NUMERIC),
Column('maxprecipmeasure', NUMERIC),
Column('avewindobs', NUMERIC),
Column('avewindmeasure', VARCHAR(50)),
Column('minwindobs', NUMERIC),
Column('minwindmeasure', VARCHAR(50)),
Column('maxwindobs', NUMERIC),
Column('maxwindmeasure', NUMERIC),
Column('avelightobs', NUMERIC),
Column('avelightmeasure', VARCHAR(50)),
Column('minlightobs', NUMERIC),
Column('minlightmeasure', VARCHAR(50)),
Column('maxlightobs', NUMERIC),
Column('maxlightmeasure', NUMERIC),
# Aquatic environmental vairables
Column('avewatertempobs', NUMERIC),
Column('avewatertempmeasure', VARCHAR(50)),
Column('minwatertempobs', NUMERIC),
Column('minwatertempmeasure', VARCHAR(50)),
Column('maxwatertempobs', NUMERIC),
Column('maxwatertempmeasure', VARCHAR(50)),
Column('avephobs', NUMERIC),
Column('avephmeasure', VARCHAR(50)),
Column('minphobs', NUMERIC),
Column('minphmeasure', VARCHAR(50)),
Column('maxphobs', NUMERIC),
Column('maxphmeasure', NUMERIC),
Column('avecondobs', NUMERIC),
Column('avecondmeasure', VARCHAR(50)),
Column('mincondobs', NUMERIC),
Column('mincondmeasure', VARCHAR(50)),
Column('maxcondobs', NUMERIC),
Column('maxcondmeasure', VARCHAR(50)),
Column('aveturbidityobs', NUMERIC),
Column('aveturbiditymeasure', VARCHAR(50)),
Column('minturbidityobs', NUMERIC),
Column('minturbiditymeasure', VARCHAR(50)),
Column('maxturbidityobs', NUMERIC),
Column('maxturbiditymeasure', VARCHAR(50)),
Column('covariates', TEXT),
Column('knbid_', VARCHAR(200)),
Column('metalink_', VARCHAR(200)),
Column('authors_', VARCHAR(200)),
Column('authors_contact_', VARCHAR(200)))
# climate_site_table: This is the initial table
# that will contain information
# regarding the LTER sites themselves. Column names
# should be self explanatory.
climate_station_table = Table(
'climate_station_table', metadata,
Column('stationid', VARCHAR(200), primary_key=True),
Column('lterid', None,
ForeignKey('lter_table.lterid')),
Column('lat_climate', NUMERIC),
Column('lng_climate', NUMERIC),
Column('descript', TEXT))
#lter_table will be the link from climate data to study data
lter_table = Table(
'lter_table', metadata,
Column('lterid', VARCHAR(10), primary_key=True),
Column('lter_name', TEXT),
Column('lat_lter', NUMERIC),
Column('lng_lter', NUMERIC),
Column('currently_funded', VARCHAR(50)),
Column('current_principle_investigator', VARCHAR(200)),
Column('current_contact_email', VARCHAR(200)),
Column('alt_contact_email', VARCHAR(200)),
Column('homepage', VARCHAR(200)))
# site_info: Table regarding site information for within each
# individual study. The table will be related to lter_info and
# the 'foreign key'= lterid/'lterid'
# (i.e. no entries are allowed in this table unless the site
# information originates at a given lter_id)
study_site_table = Table(
'study_site_table', metadata,
Column('study_site_key', VARCHAR(200), primary_key=True),
Column('lter_table_fkey', VARCHAR(10),
ForeignKey('lter_table.lterid')),
Column('lat_study_site', NUMERIC),
Column('lng_study_site', NUMERIC),
Column('descript', TEXT))
project_table = Table(
'project_table', metadata,
# This column is the unique index that we created
# in order to keep track of all the datasets that
# will be uploaded
Column('proj_metadata_key', INTEGER, primary_key=True),
Column('lter_project_fkey', VARCHAR(10),
ForeignKey('lter_table.lterid')),
Column('title', TEXT),
# META: This column specifies the type of information
# about the sampling organisms life stage
# ie. adult, juvenile, size, etc
Column('samplingunits', VARCHAR(50)),
# META: This column specifies the type of data that was
# collected (i.e. count, biomass, percent cover, etc.)
Column('datatype', VARCHAR(50)),
# META: This column specifies the type of information
# about the sampling organisms life stage
# ie. size, age, life-stage
Column('structured_type_1', VARCHAR(50)),
Column('structured_type_1_units', VARCHAR(50)),
Column('structured_type_2', VARCHAR(50)),
Column('structured_type_2_units', VARCHAR(50)),
Column('structured_type_3', VARCHAR(50)),
Column('structured_type_3_units', VARCHAR(50)),
Column('structured_type_4', VARCHAR(50)),
Column('structured_type_4_units', VARCHAR(50)),
Column('studystartyr', NUMERIC),
Column('studyendyr', NUMERIC),
Column('duration_years', Integer),
# META: This column relates to the frequency of sampling
# i.e. seasonal, monthly, month:yr, season:yr, daily, etc.
Column('samplefreq', TEXT),
# META: This column list whether the study was observational
# or experimental (which includes historic experiemental
# events)
Column('studytype', VARCHAR(50)),
# META: This column indicates whether the study contained
# community level data (i.e. data over multiple
# taxonomic groups
Column('community', VARCHAR(50)),
# Spatial replicate informatoin
# META:
# sp_repX_ext: columns describes the
# spatial extent sample at that level of spatial
# replication
# sp_repX_ext_units: column describes the unit
# of measurement corresponding to that level of spatial
# replication
# sp_repX_label: describes the labeling scheme used
# by the study
#Derived:
# sp_repX_uniquelevels: count of the number of unique
# levels within that replicate level for a given site;
# encompassed all time and taxa units.
Column('spatial_replication_level_1_extent', NUMERIC),
Column('spatial_replication_level_1_extent_units', VARCHAR(200)),
Column('spatial_replication_level_1_label', VARCHAR(200)),
Column('spatial_replication_level_1_number_of_unique_reps', INTEGER),
Column('spatial_replication_level_2_extent', NUMERIC),
Column('spatial_replication_level_2_extent_units', VARCHAR(200)),
Column('spatial_replication_level_2_label', VARCHAR(200)),
Column('spatial_replication_level_2_number_of_unique_reps', INTEGER),
Column('spatial_replication_level_3_extent', NUMERIC),
Column('spatial_replication_level_3_extent_units', VARCHAR(200)),
Column('spatial_replication_level_3_label', VARCHAR(200)),
Column('spatial_replication_level_3_number_of_unique_reps', INTEGER),
Column('spatial_replication_level_4_extent', NUMERIC),
Column('spatial_replication_level_4_extent_units', VARCHAR(200)),
Column('spatial_replication_level_4_label', VARCHAR(200)),
Column('spatial_replication_level_4_number_of_unique_reps', INTEGER),
Column('spatial_replication_level_5_extent', NUMERIC),
Column('spatial_replication_level_5_extent_units', VARCHAR(200)),
Column('spatial_replication_level_5_label', VARCHAR(200)),
Column('spatial_replication_level_5_number_of_unique_reps', INTEGER),
# Columns regarding treatments
Column('treatment_type_1', VARCHAR(200)),
Column('treatment_type_2', VARCHAR(200)),
Column('treatment_type_3', VARCHAR(200)),
Column('control_group', VARCHAR(200)),
Column('derived', VARCHAR(200)),
# Columns relating to author, metadata, other sources
Column('authors', TEXT),
Column('authors_contact', VARCHAR(200)),
Column('metalink', TEXT),
Column('knbid', VARCHAR(200)))
# main: Table describing the raw data that was collected
# for each individual project
# 'foreign key' ='siteid'
# This is in case there is a project that does not give a specific
# 'siteid' that can be used in the schema and to ensure that
# any site data entered comes from
site_in_project_table = Table(
'site_in_project_table', metadata,
Column(
'site_in_project_key',
Integer, primary_key=True),
Column('study_site_table_fkey', None,
ForeignKey(
'study_site_table.study_site_key')),
Column('project_table_fkey', None,
ForeignKey('project_table.proj_metadata_key')),
# DERIVED: start year of data collection for
# a particular site
Column('sitestartyr', NUMERIC),
# DERIVED: end year of data collection for
# a particular site
Column('siteendyr', NUMERIC),
# DERIVED: This will be the total observation
# related to this project. THis includes
# all temporal and spatial levels and
# all taxa units
Column('totalobs', NUMERIC),
# DERIVED: calculates the number of unique
# taxonomic units from raw data
Column('uniquetaxaunits', NUMERIC))
# taxa: Table regarding taxanomic information. Change from
# last time involves the forgein key and the addition of
# a column for species code (in case raw table information, does
# not contain a key for translation).
# 'foreign key' = site_info/'siteid'
taxa_table = Table(
'taxa_table', metadata,
Column('taxa_table_key', Integer, primary_key=True),
Column('site_in_project_taxa_key', None, ForeignKey(
'site_in_project_table.site_in_project_key', ondelete="CASCADE")),
Column('sppcode', VARCHAR(100)),
Column('kingdom', VARCHAR(100)),
Column('subkingdom', VARCHAR(100)),
Column('infrakingdom', VARCHAR(100)),
Column('superdivision', VARCHAR(100)),
Column('division', VARCHAR(100)),
Column('subdivision', VARCHAR(100)),
Column('superphylum', VARCHAR(100)),
Column('phylum', VARCHAR(100)),
Column('subphylum', VARCHAR(100)),
Column('clss', VARCHAR(100)),
Column('subclass', VARCHAR(100)),
Column('ordr', VARCHAR(100)),
Column('family', VARCHAR(100)),
Column('genus', VARCHAR(100)),
Column('species', VARCHAR(100)),
Column('common_name', VARCHAR(100)),
Column('authority', VARCHAR(100)),
Column('metadata_taxa_key', Integer))
taxa_accepted_table = Table(
'taxa_accepted_table', metadata,
Column('taxa_accepted_table_key', Integer, primary_key=True),
Column('taxa_original_fkey', None, ForeignKey(
'taxa_table.taxa_table_key', ondelete="CASCADE")),
Column('sppcode', VARCHAR(100)),
Column('kingdom_accepted', VARCHAR(100)),
Column('subkingdom_accepted', VARCHAR(100)),
Column('infrakingdom_accepted', VARCHAR(100)),
Column('superdivision_accepted', VARCHAR(100)),
Column('division_accepted', VARCHAR(100)),
Column('superphylum_accepted', VARCHAR(100)),
Column('phylum_accepted', VARCHAR(100)),
Column('subphylum_accepted', VARCHAR(100)),
Column('subdivision_accepted', VARCHAR(100)),
Column('clss_accepted', VARCHAR(100)),
Column('subclass_accepted', VARCHAR(100)),
Column('ordr_accepted', VARCHAR(100)),
Column('family_accepted', VARCHAR(100)),
Column('genus_accepted', VARCHAR(100)),
Column('species_accepted', VARCHAR(100)),
Column('common_name_accepted', VARCHAR(100)),
Column('authority', VARCHAR(100)))
# Count table
count_table = Table(
'count_table', metadata,
Column('count_table_key', Integer, primary_key=True),
Column('taxa_count_fkey', None, ForeignKey(
'taxa_table.taxa_table_key', ondelete="CASCADE")),
Column('site_in_project_count_fkey', None, ForeignKey(
'site_in_project_table.site_in_project_key', ondelete="CASCADE")),
Column('year', NUMERIC),
Column('month', NUMERIC),
Column('day', NUMERIC),
Column('spatial_replication_level_1', VARCHAR(50)),
Column('spatial_replication_level_2', VARCHAR(50)),
Column('spatial_replication_level_3', VARCHAR(50)),
Column('spatial_replication_level_4', VARCHAR(50)),
Column('spatial_replication_level_5', VARCHAR(50)),
Column('treatment_type_1', VARCHAR(200)),
Column('treatment_type_2', VARCHAR(200)),
Column('treatment_type_3', VARCHAR(200)),
Column('structure_type_1', VARCHAR(200)),
Column('structure_type_2', VARCHAR(200)),
Column('structure_type_3', VARCHAR(200)),
Column('structure_type_4', VARCHAR(50)),
Column('count_observation', NUMERIC),
Column('covariates', TEXT),
Column('metadata_count_key', Integer))
# Biomass Table
biomass_table = Table(
'biomass_table', metadata,
Column('biomass_table_key', Integer, primary_key=True),
Column('taxa_biomass_fkey', None, ForeignKey(
'taxa_table.taxa_table_key', ondelete="CASCADE")),
Column('site_in_project_biomass_fkey', None, ForeignKey(
'site_in_project_table.site_in_project_key', ondelete="CASCADE")),
Column('year', NUMERIC),
Column('month', NUMERIC),
Column('day', NUMERIC),
Column('spatial_replication_level_1', VARCHAR(50)),
Column('spatial_replication_level_2', VARCHAR(50)),
Column('spatial_replication_level_3', VARCHAR(50)),
Column('spatial_replication_level_4', VARCHAR(50)),
Column('spatial_replication_level_5', VARCHAR(50)),
Column('treatment_type_1', VARCHAR(200)),
Column('treatment_type_2', VARCHAR(200)),
Column('treatment_type_3', VARCHAR(200)),
Column('structure_type_1', VARCHAR(200)),
Column('structure_type_2', VARCHAR(200)),
Column('structure_type_3', VARCHAR(200)),
Column('structure_type_4', VARCHAR(50)),
Column('biomass_observation', NUMERIC),
Column('covariates', TEXT),
Column('metadata_biomass_key', Integer))
# Density Table
density_table = Table(
'density_table', metadata,
Column('density_table_key', Integer, primary_key=True),
Column('taxa_density_fkey', None, ForeignKey(
'taxa_table.taxa_table_key', ondelete="CASCADE")),
Column('site_in_project_density_fkey', None, ForeignKey(
'site_in_project_table.site_in_project_key', ondelete="CASCADE")),
Column('year', NUMERIC),
Column('month', NUMERIC),
Column('day', NUMERIC),
Column('spatial_replication_level_1', VARCHAR(50)),
Column('spatial_replication_level_2', VARCHAR(50)),
Column('spatial_replication_level_3', VARCHAR(50)),
Column('spatial_replication_level_4', VARCHAR(50)),
Column('spatial_replication_level_5', VARCHAR(50)),
Column('treatment_type_1', VARCHAR(200)),
Column('treatment_type_2', VARCHAR(200)),
Column('treatment_type_3', VARCHAR(200)),
Column('structure_type_1', VARCHAR(200)),
Column('structure_type_2', VARCHAR(200)),
Column('structure_type_3', VARCHAR(200)),
Column('structure_type_4', VARCHAR(50)),
Column('density_observation', NUMERIC),
Column('covariates', TEXT),
Column('metadata_density_key', Integer))
# Percent Cover Table
percent_cover_table = Table(
'percent_cover_table', metadata,
Column('percent_cover_table_key', Integer, primary_key=True),
Column('taxa_percent_cover_fkey', None, ForeignKey(
'taxa_table.taxa_table_key', ondelete="CASCADE")),
Column('site_in_project_percent_cover_fkey', None, ForeignKey(
'site_in_project_table.site_in_project_key', ondelete="CASCADE")),
Column('year', NUMERIC),
Column('month', NUMERIC),
Column('day', NUMERIC),
Column('spatial_replication_level_1', VARCHAR(50)),
Column('spatial_replication_level_2', VARCHAR(50)),
Column('spatial_replication_level_3', VARCHAR(50)),
Column('spatial_replication_level_4', VARCHAR(50)),
Column('spatial_replication_level_5', VARCHAR(50)),
Column('treatment_type_1', VARCHAR(200)),
Column('treatment_type_2', VARCHAR(200)),
Column('treatment_type_3', VARCHAR(200)),
Column('structure_type_1', VARCHAR(200)),
Column('structure_type_2', VARCHAR(200)),
Column('structure_type_3', VARCHAR(200)),
Column('structure_type_4', VARCHAR(50)),
Column('percent_cover_observation', NUMERIC),
Column('covariates', TEXT),
Column('metadata_percent_cover_key', Integer))
# Individual Table
individual_table = Table(
'individual_table', metadata,
Column('individual_table_key', Integer, primary_key=True),
Column('taxa_individual_fkey', None, ForeignKey(
'taxa_table.taxa_table_key', ondelete="CASCADE")),
Column('site_in_project_individual_fkey', None, ForeignKey(
'site_in_project_table.site_in_project_key', ondelete="CASCADE")),
Column('year', NUMERIC),
Column('month', NUMERIC),
Column('day', NUMERIC),
Column('spatial_replication_level_1', VARCHAR(50)),
Column('spatial_replication_level_2', VARCHAR(50)),
Column('spatial_replication_level_3', VARCHAR(50)),
Column('spatial_replication_level_4', VARCHAR(50)),
Column('spatial_replication_level_5', VARCHAR(50)),
Column('treatment_type_1', VARCHAR(200)),
Column('treatment_type_2', VARCHAR(200)),
Column('treatment_type_3', VARCHAR(200)),
Column('structure_type_1', VARCHAR(200)),
Column('structure_type_2', VARCHAR(200)),
Column('structure_type_3', VARCHAR(200)),
Column('structure_type_4', VARCHAR(50)),
Column('individual_observation', NUMERIC),
Column('covariates', TEXT),
Column('metadata_individual_key', Integer))
# This command takes all the information that was stored in the
# metadata catalog and uses it to populate the database that
# we connected to with our engine (user=postgres, databse=LTER)
metadata.create_all(engine)
lterex.to_sql(
ltertablename, con=engine, if_exists="append", index=False)
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import sys
import json
import time
import click
import errno
import string
import random
import shutil
import logging
import tarfile
import zipfile
import tempfile
import collections
from shutil import copy
from contextlib import closing, contextmanager
from backports.shutil_get_terminal_size import get_terminal_size
import yaml
import requests
from retrying import retry
from .logger import get_logger, get_events_logger
from .exceptions import CloudifyCliError, CloudifyTimeoutError
from .constants import SUPPORTED_ARCHIVE_TYPES, DEFAULT_TIMEOUT
from .execution_events_fetcher import ExecutionEventsFetcher
from cloudify._compat import urlparse
from cloudify.models_states import BlueprintUploadState
from cloudify_rest_client.constants import VisibilityState
from cloudify_rest_client.exceptions import CloudifyClientError
WAIT_FOR_BLUEPRINT_UPLOAD_SLEEP_INTERVAL = 1
def get_deployment_environment_execution(client, deployment_id, workflow):
executions = client.executions.list(deployment_id=deployment_id,
workflow_id=workflow,
sort='created_at',
is_descending=True)
if executions and len(executions) > 0:
return executions[0]
raise RuntimeError(
'Failed to get {0} workflow execution for deployment {1}'.format(
workflow, deployment_id)
)
def dump_to_file(collection, file_path):
with open(file_path, 'a') as f:
f.write(os.linesep.join(collection))
f.write(os.linesep)
def is_virtual_env():
if hasattr(sys, 'base_prefix'):
# py3 case, with the stdlib venv
return sys.base_prefix != sys.prefix
return hasattr(sys, 'real_prefix')
# TODO: Really? Remove!
def get_cwd():
"""Allows use to patch the cwd when needed.
"""
return os.getcwd()
def remove_if_exists(path):
try:
if os.path.isfile(path):
os.remove(path)
if os.path.isdir(path):
shutil.rmtree(path)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occurred
def generate_random_string(size=6,
chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def generate_suffixed_id(id):
return '{0}_{1}'.format(id, generate_random_string())
def is_archive(source):
return tarfile.is_tarfile(source) or zipfile.is_zipfile(source)
def extract_archive(source):
if tarfile.is_tarfile(source):
return untar(source)
elif zipfile.is_zipfile(source):
return unzip(source)
raise CloudifyCliError(
'Unsupported archive type provided or archive is not valid: {0}.'
' Supported archive types are: {1}'
.format(source, SUPPORTED_ARCHIVE_TYPES)
)
def tar(source, destination):
logger = get_logger()
logger.debug('Creating tgz archive: {0}...'.format(destination))
with closing(tarfile.open(destination, 'w:gz')) as tar:
tar.add(source, arcname=os.path.basename(source))
def untar(archive, destination=None):
if not destination:
destination = tempfile.mkdtemp()
logger = get_logger()
logger.debug('Extracting tar archive {0} to {1}...'
.format(archive, destination))
with closing(tarfile.open(name=archive)) as tar:
tar.extractall(path=destination, members=tar.getmembers())
return destination
def zip_files(files):
source_folder = tempfile.mkdtemp()
destination_zip = source_folder + '.zip'
for path in files:
copy(path, source_folder)
create_zip(source_folder, destination_zip, include_folder=False)
shutil.rmtree(source_folder)
return destination_zip
def create_zip(source, destination, include_folder=True):
logger = get_logger()
logger.debug('Creating zip archive: {0}...'.format(destination))
with closing(zipfile.ZipFile(destination, 'w')) as zip_file:
for root, _, files in os.walk(source):
for filename in files:
file_path = os.path.join(root, filename)
source_dir = os.path.dirname(source) if include_folder\
else source
zip_file.write(
file_path, os.path.relpath(file_path, source_dir))
return destination
def unzip(archive, destination=None):
if not destination:
destination = tempfile.mkdtemp()
logger = get_logger()
logger.debug('Extracting zip {0} to {1}...'.format(archive, destination))
with closing(zipfile.ZipFile(archive, 'r')) as zip_file:
zip_file.extractall(destination)
return destination
def download_file(url, destination=None, keep_name=False):
"""Download file.
:param url: Location of the file to download
:type url: str
:param destination:
Location where the file should be saved (autogenerated by default)
:param keep_name: use the filename from the url as destination filename
:type destination: str | None
:returns: Location where the file was saved
:rtype: str
"""
CHUNK_SIZE = 1024
logger = get_logger()
if not destination:
if keep_name:
path = urlparse(url).path
name = os.path.basename(path)
destination = os.path.join(tempfile.mkdtemp(), name)
else:
fd, destination = tempfile.mkstemp()
os.close(fd)
logger.info('Downloading {0} to {1}...'.format(url, destination))
try:
response = requests.get(url, stream=True)
except requests.exceptions.RequestException as ex:
raise CloudifyCliError(
'Failed to download {0}. ({1})'.format(url, str(ex)))
final_url = response.url
if final_url != url:
logger.debug('Redirected to {0}'.format(final_url))
try:
with open(destination, 'wb') as destination_file:
for chunk in response.iter_content(CHUNK_SIZE):
destination_file.write(chunk)
except IOError as ex:
raise CloudifyCliError(
'Failed to download {0}. ({1})'.format(url, str(ex)))
return destination
def generate_progress_handler(file_path, action='', max_bar_length=80):
"""Returns a function that prints a progress bar in the terminal
:param file_path: The name of the file being transferred
:param action: Uploading/Downloading
:param max_bar_length: Maximum allowed length of the bar. Default: 80
:return: The configured print_progress function
"""
# We want to limit the maximum line length to 80, but allow for a smaller
# terminal size. We also include the action string, and some extra chars
terminal_width = get_terminal_size().columns
# This takes care of the case where there is no terminal (e.g. unittest)
terminal_width = terminal_width or max_bar_length
bar_length = min(max_bar_length, terminal_width) - len(action) - 12
# Shorten the file name if it's too long
file_name = os.path.basename(file_path)
if len(file_name) > (bar_length // 4) + 3:
file_name = file_name[:bar_length // 4] + '...'
bar_length -= len(file_name)
def print_progress(read_bytes, total_bytes):
"""Print upload/download progress on a single line
Call this function in a loop to create a progress bar in the terminal
:param read_bytes: Number of bytes already processed
:param total_bytes: Total number of bytes in the file
"""
filled_length = min(bar_length, int(round(bar_length * read_bytes /
float(total_bytes))))
percents = min(100.00, round(
100.00 * (read_bytes / float(total_bytes)), 2))
bar = '#' * filled_length + '-' * (bar_length - filled_length)
# The \r caret makes sure the cursor moves back to the beginning of
# the line
msg = '\r{0} {1} |{2}| {3}%'.format(action, file_name, bar, percents)
click.echo(msg, nl=False)
if read_bytes >= total_bytes:
sys.stdout.write('\n')
return print_progress
@contextmanager
def handle_client_error(status_code, message, logger):
"""Gracefully handle client errors with specific status codes
"""
try:
yield
except CloudifyClientError as e:
if e.status_code != status_code:
raise
logger.info(message)
@contextmanager
def prettify_client_error(status_codes, logger):
"""Prettify client errors with specific status codes
:param status_codes: List of status codes
:param logger: Logger for writing the error
"""
try:
yield
except CloudifyClientError as e:
if e.status_code not in status_codes:
raise
logger.error('Error: %s', e)
def get_visibility(private_resource,
visibility,
logger,
valid_values=VisibilityState.STATES):
# These arguments are mutually exclusive so only one can be used
if private_resource:
logger.info("The 'private_resource' argument will be deprecated soon, "
"please use the 'visibility' argument instead")
return VisibilityState.PRIVATE
validate_visibility(visibility, valid_values)
return visibility
def validate_visibility(visibility, valid_values=VisibilityState.STATES):
if visibility and visibility not in valid_values:
raise CloudifyCliError(
"Invalid visibility: `{0}`. Valid visibility's values are: "
"{1}".format(visibility, valid_values)
)
def get_local_path(source, destination=None, create_temp=False):
allowed_schemes = ['http', 'https']
if urlparse(source).scheme in allowed_schemes:
downloaded_file = download_file(source, destination, keep_name=True)
return downloaded_file
elif os.path.isfile(source):
if not destination and create_temp:
source_name = os.path.basename(source)
destination = os.path.join(tempfile.mkdtemp(), source_name)
if destination:
shutil.copy(source, destination)
return destination
else:
return source
else:
raise CloudifyCliError(
'You must provide either a path to a local file, or a remote URL '
'using one of the allowed schemes: {0}'.format(allowed_schemes))
def explicit_tenant_name_message(tenant_name, logger):
if tenant_name:
logger.info('Explicitly using tenant `{0}`'.format(tenant_name))
def deep_update_dict(dest_dict, src_dict):
for key, value in src_dict.items():
if isinstance(dest_dict, collections.MutableMapping):
if isinstance(value, collections.MutableMapping):
dest_dict[key] = deep_update_dict(dest_dict.get(key), value)
else:
dest_dict[key] = src_dict[key]
else:
dest_dict = {key: src_dict[key]}
return dest_dict
def deep_subtract_dict(dest_dict, src_dict):
for key, value in src_dict.items():
if isinstance(value, collections.MutableMapping):
deep_subtract_dict(dest_dict.get(key), value)
else:
if key not in dest_dict:
raise CloudifyCliError('Key {} does not exist'.format(key))
dest_dict.pop(key)
def insert_dotted_key_to_dict(dest_dict, key, value):
"""Insert the value into dest_dict according to the key which is in dot
hierarchy format
:param dest_dict: The dict to update
:param key: The dot hierarchy key, e.g. 'a.b.c'
:param value: The value to insert, e.g. 'd'
:return: dest_dict will include the value in the wanted location,
e.g. {a: {b: {c: d}}}
"""
key_path = key.split('.')
for item in key_path[:-1]:
dest_dict.setdefault(item, {})
dest_dict = dest_dict[item]
dest_dict.setdefault(key_path[-1], value)
def assert_one_argument(arguments):
"""Asserts exactly one argument in a dictionary of
{argument_name: argument} is not null or False, else raises an error
"""
filtered = [k for k in arguments if arguments[k]]
if len(filtered) != 1:
raise CloudifyCliError('Please provide one of the options: ' +
', '.join(arguments))
def load_json(input_path):
if not input_path:
return
with open(input_path) as json_file:
return json.load(json_file)
# return json_content
def print_dict(keys_dict, logger):
for key, values in keys_dict.items():
str_values = [str(value) for value in values]
logger.info('{0}: {1}'. format(key, str_values))
def get_dict_from_yaml(yaml_path):
with open(yaml_path) as f:
return yaml.load(f, yaml.Loader)
def wait_for_blueprint_upload(client, blueprint_id, logging_level):
def _handle_errors():
if blueprint['state'] in BlueprintUploadState.FAILED_STATES:
error_msg = '{error_type} blueprint: {description}.'.format(
error_type=blueprint['state'].capitalize().replace('_', ' '),
description=blueprint['error']
)
if logging_level == logging.DEBUG:
error_msg += '\nError traceback: {}'.format(
blueprint['error_traceback'])
raise CloudifyCliError(error_msg)
@retry(stop_max_attempt_number=5, wait_fixed=1000)
def _get_blueprint_and_upload_execution_id():
bp = client.blueprints.get(blueprint_id)
# upload_execution['id'] might not be available at first, hence retry
return bp, bp.upload_execution['id']
try:
blueprint, execution_id = _get_blueprint_and_upload_execution_id()
except KeyError:
raise RuntimeError(
'Failed to get upload_blueprint workflow execution for blueprint '
'{0}. That may indicate a problem with blueprint upload. Verify '
'blueprint\'s state by running command `cfy blueprints get {0}`.'
.format(blueprint_id)
)
# if blueprint upload already ended - return without waiting
if blueprint['state'] in BlueprintUploadState.END_STATES:
_handle_errors()
return blueprint
deadline = time.time() + DEFAULT_TIMEOUT
events_fetcher = ExecutionEventsFetcher(
client, execution_id=execution_id, include_logs=True)
# Poll for execution status and execution logs, until execution ends
# and we receive an event of type in WORKFLOW_END_TYPES
upload_ended = False
events_handler = get_events_logger(None)
# Poll for blueprint upload status, until the upload ends
while True:
if time.time() > deadline:
raise CloudifyTimeoutError('Blueprint {0} upload timed '
'out'.format(blueprint.id))
timeout = deadline - time.time() # update remaining timeout
if not upload_ended:
blueprint = client.blueprints.get(blueprint.id)
upload_ended = \
blueprint['state'] in BlueprintUploadState.END_STATES
events_fetcher.fetch_and_process_events(
events_handler=events_handler, timeout=timeout)
if upload_ended:
break
time.sleep(WAIT_FOR_BLUEPRINT_UPLOAD_SLEEP_INTERVAL)
blueprint = client.blueprints.get(blueprint_id)
_handle_errors()
return blueprint
|
|
from django.contrib.auth.models import User
from django.db import models
from core.lists import (
ALIGNMENT_CHOICES,
DIRECTION_CHOICES,
DOOR_RESET_CHOICES,
DOOR_TRIGGER_TYPE_CHOICES,
SEX_CHOICES,
WEAPON_TYPE_CHOICES,
)
### Area models ###
class AreaFlag(models.Model):
"""
Flags that can be applied to an area.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class Area(models.Model):
"""
A new TFC Area! Yay!
"""
author = models.OneToOneField(User, blank=False)
vnum = models.PositiveIntegerField(unique=True)
name = models.TextField(blank=False, unique=True)
forum = models.URLField(blank=True)
level_low = models.PositiveSmallIntegerField(blank=False, default=1)
level_high = models.PositiveSmallIntegerField(blank=False, default=50)
flags = models.ManyToManyField(AreaFlag)
notes = models.TextField()
class AreaHelp(models.Model):
"""
A help file related to this area.
BLOCK: #HELP
"""
area = models.OneToOneField(Area, blank=False)
keywords = models.CharField(max_length=50)
level = models.SmallIntegerField(blank=False, default=0, help_text="Lowest level that can read this help.")
blank_line = models.BooleanField(blank=False, default=False, help_text="Include blank line at the beginning of this help?")
text = models.TextField(blank=False)
### Item models ###
# TODO: fill out the write_values() defs for each item type. use adv string formatting!
# TODO: write tests to ensure that the item type ids are correct and all t
class Spell(models.Model):
"""
Approved spells.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
class WeaponDamageType(models.Model):
"""
Damage identifiers done by certain types of weapons.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
weapon_type = models.CharField(max_length=1, choices=WEAPON_TYPE_CHOICES)
class DrinkType(models.Model):
"""
Types that a drinkable can be.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
adjective = models.CharField(max_length=50, blank=False, unique=True)
class ContainerFlag(models.Model):
"""
Flags that can be applied to a container.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class ItemType(models.Model):
"""
Types that an item can be.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class ExtraDescription(models.Model):
"""
Extra description available to add to an object.
"""
item = models.ForeignKey('Item', related_name='extra_descriptions')
TFC_id = models.PositiveIntegerField(blank=False)
keywords = models.TextField(blank=False)
description = models.TextField(blank=False)
class WearFlag(models.Model):
"""
Places an item can be worn.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class ResetWearFlag(models.Model):
"""
Places an item can be reset to on a mobile.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class ItemExtraFlag(models.Model):
"""
Extra flags available to apply to items.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class ItemModifier(models.Model):
"""
Stats that an item can modify in some way - these are the Item Applies flags.
If any of these are used on an item, it automatically gets the (Magical)
ItemExtraFlag.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
requires_approval = models.BooleanField(blank=False, default=False)
description = models.TextField()
### There is actually a reason for all this Item model madness. It has to do
### with properly displaying the fields for creation of item types to the
### area builder. This could all be cleaned up with some Class Factory
### magic (which would also make Item Types dynamically editable), but that
### is going to have to be on the wishlist for now.
class Item(models.Model):
"""
An item that lives in a TFC Area.
BLOCK: #OBJECTS
"""
area = models.ForeignKey(Area, blank=False)
vnum = models.PositiveIntegerField(blank=False)
# names needs to be lower()d.
names = models.TextField(help_text='A few keywords for this object.')
# short_desc needs to be lower()d.
short_desc = models.TextField(help_text='A short phrase identifying the object; e.g. "a stone hammer"')
long_desc = models.TextField(help_text='Description of an object standing alone; e.g. "A heavy stone hammer lies here."')
takeable = models.BooleanField(default=False)
# A shopkeeper will sell most Item types, but not Trash.
salable = True
wear_flags = models.ForeignKey(WearFlag)
weight = models.PositiveSmallIntegerField(default=4, help_text="Total weight including carrying weight if this is a container.")
cost = models.PositiveIntegerField(default=1000)
values = models.PositiveIntegerField(help_text="Number of coins")
flammable = models.BooleanField(blank=False, default=False)
metallic = models.BooleanField(blank=False, default=False)
two_handed = models.BooleanField(blank=False, default=False)
underwater_breath = models.BooleanField(blank=False, default=False)
total_in_game = models.PositiveSmallIntegerField(blank=False, default=1, help_text="Total number of this item allowed in the game (won't reset when max is reached)")
notes = models.TextField()
class Meta:
unique_together = ('area', 'vnum')
class Light(Item):
"""
An item of the Light type.
"""
item_type = 1
hours = models.SmallIntegerField(blank=False, default=0, help_text="Number of hours of light. Use -1 for infinite and 0 for dead.")
def write_values(self):
"""
Prints out the values needed for a zone file.
"""
print "0 0 %d 0" % (hours)
class Fountain(Item):
"""
A fountain item.
"""
item_type = 25
spell = models.ForeignKey(Spell, blank=False)
spell_level = models.PositiveSmallIntegerField(blank=False)
drink_type = models.ForeignKey(DrinkType, blank=False)
def write_values(self):
pass
class BaseWeapon(Item):
"""
Weapon base class. There are weapons and animal weapons.
"""
minimum_damage = models.PositiveSmallIntegerField(blank=False)
maximum_damage = models.PositiveSmallIntegerField(blank=False)
weapon_damage_type = models.ForeignKey(WeaponDamageType, blank=False)
class Meta:
abstract = True
class BaseArmor(Item):
"""
Base armor class. There is armor and animal armor.
"""
ac_rating = models.SmallIntegerField(blank=False)
class Meta:
abstract = True
class BaseFood(Item):
"""
Base food class. There is food and pet food.
"""
hours = models.PositiveSmallIntegerField(blank=False)
poison = models.SmallIntegerField(blank=False, default=0, help_text="0 is non-poisonous, non-zero is poisonous.")
class Meta:
abstract = True
class SimpleMagicalItem(Item):
"""
A magical item that just has one charge of up to three spells.
"""
spells = models.ManyToManyField(Spell, blank=False, help_text="Choose up to three spells. Only three will be exported, randomly if more exist.")
spell_level = models.PositiveSmallIntegerField(blank=False)
class Meta:
abstract = True
class ChargedMagicalItem(Item):
"""
A magical item with charges.
"""
spell = models.ForeignKey(Spell, blank=False)
spell_level = models.PositiveSmallIntegerField(blank=False)
max_charges = models.PositiveSmallIntegerField(blank=False)
remaining_charges = models.PositiveSmallIntegerField(blank=False)
class Meta:
abstract = True
class Weapon(BaseWeapon):
item_type = 5
def write_values(self):
pass
class AnimalWeapon(BaseWeapon):
"""
A weapon derived from an animal (e.g. a scorpion's stinger).)
"""
item_type = 6
def write_values(self):
pass
class Meta:
verbose_name = 'animal-based weapon'
class Armor(BaseArmor):
item_type = 9
def write_values(self):
pass
class Meta:
verbose_name = 'armor item'
class AnimalArmor(BaseArmor):
"""
Armor derived from an animal. Mobs that have no_wear_armor
can still wear animal armor.
"""
item_type = 14
def write_values(self):
pass
class Meta:
verbose_name = 'animal-based armor item'
class Food(BaseFood):
item_type = 19
def write_values(self):
pass
class Meta:
verbose_name = 'food item'
class PetFood(BaseFood):
item_type = 11
def write_values(self):
pass
class Meta:
verbose_name = 'pet food item'
class Scroll(SimpleMagicalItem):
item_type = 2
def write_values(self):
pass
class Potion(SimpleMagicalItem):
item_type = 10
def write_values(self):
pass
class Pill(SimpleMagicalItem):
item_type = 26
def write_values(self):
pass
class Wand(ChargedMagicalItem):
item_type = 3
def write_values(self):
pass
class Staff(ChargedMagicalItem):
item_type = 4
def write_values(self):
pass
class Meta:
verbose_name_plural = 'staves'
class Fetish(ChargedMagicalItem):
item_type = 7
def write_values(self):
pass
class Meta:
verbose_name_plural = 'fetishes'
class Ring(ChargedMagicalItem):
item_type = 29
def write_values(self):
pass
class Relic(ChargedMagicalItem):
item_type = 33
def write_values(self):
pass
class NonMagicalItem(Item):
class Meta:
abstract = True
class Treasure(NonMagicalItem):
item_type = 8
def write_values(self):
pass
class Meta:
verbose_name = 'treasure item'
class Furniture(NonMagicalItem):
item_type = 12
def write_values(self):
pass
class Meta:
verbose_name = 'furniture item'
class Trash(NonMagicalItem):
item_type = 13
# Shopkeepers don't sell Trash. Sorry.
salable = False
def write_values(self):
pass
class Meta:
verbose_name = 'trash item'
class Key(NonMagicalItem):
item_type = 18
def write_values(self):
pass
class Boat(NonMagicalItem):
item_type = 22
def write_values(self):
pass
class Decoration(NonMagicalItem):
item_type = 27
def write_values(self):
pass
class Jewelry(NonMagicalItem):
item_type = 30
def write_values(self):
pass
class Meta:
verbose_name = 'jewelry item'
class DrinkContainer(Item):
"""
An item that can contain liquids.
"""
item_type = 17
capacity = models.SmallIntegerField(blank=False, help_text="Capacity of this drink container.")
remaining = models.SmallIntegerField(blank=False, help_text="Amount remaining in the container.")
drink_type = models.ForeignKey(DrinkType, blank=False)
poison = models.SmallIntegerField(blank=False, default=0, help_text="0 is non-poisonous, non-zero is poisonous.")
def write_values(self):
pass
class Container(Item):
"""
An item that can contain other non-liquid items.
"""
item_type = 15
key = models.ForeignKey(Key, blank=True)
flags = models.ManyToManyField('ContainerFlag')
def write_values(self):
pass
class ItemContainerReset(models.Model):
"""
AKA a Place Reset.
Loads an object into another object. This will be placed onto a Container
type object, since only containers can contain other objects.
"""
container = models.ForeignKey(Container, blank=False, related_name='item_resets')
item = models.ForeignKey(Item, blank=False, related_name='container_resets')
reset_every_cycle = models.BooleanField(blank=False, default=False, help_text="Reset this item every cycle (instead of only when zone is deserted)?")
comment = models.TextField(blank=True)
class Meta:
verbose_name = 'item-container reset'
unique_together = ('container', 'item')
class Money(Item):
item_type = 20
number_of_coins = models.SmallIntegerField(blank=False, default=1, help_text="How many coins is this money worth?")
def write_values(self):
pass
class Meta:
verbose_name = 'pile of money'
verbose_name_plural = 'piles of money'
### Mobile models
class Race(models.Model):
"""
Races available for shopkeepers.
"""
TFC_id = models.SmallIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
class KnownLanguage(models.Model):
"""
Languages that mobiles can *know*. The Language called "God" has a
different id if it is preferred vs. just known by a mob, so we have to
have separate models, here.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
class PreferredLanguage(models.Model):
"""
Languages that mobiles can *prefer*. The Language called "God" has a
different id if it is preferred vs. just known by a mob, so we have to
have separate models, here.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
class ActionFlag(models.Model):
"""
Action flags available to apply to mobiles.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField(blank=False)
class AffectFlag(models.Model):
"""
Affect flags available to apply to mobiles.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
implemented = models.BooleanField(blank=False, default=True)
class SpecialFunction(models.Model):
"""
Additional functionality for mobiles.
BLOCK: #SPECIALS
"""
TFC_id = models.CharField(max_length=50, blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
generic = models.BooleanField(blank=False, default=True)
class Mobile(models.Model):
"""
A creature that can move around and do stuff, but isn't human.
BLOCK: #MOBILES
"""
area = models.ForeignKey(Area, blank=False)
vnum = models.PositiveIntegerField(blank=False)
names = models.TextField()
short_desc = models.TextField()
long_desc = models.TextField()
look_desc = models.TextField()
level = models.PositiveSmallIntegerField(default=1, help_text="What level is this Mob?")
alignment = models.PositiveSmallIntegerField(choices=ALIGNMENT_CHOICES)
sex = models.SmallIntegerField(choices=SEX_CHOICES)
is_animal = models.BooleanField(default=False, help_text="Is this Mob an animal?")
spell = models.ForeignKey(Spell, blank=True, help_text="What spell (if any) does this Mob know?")
affect_flags = models.ManyToManyField(AffectFlag)
action_flags = models.ManyToManyField(ActionFlag)
no_wear = models.BooleanField(default=False, help_text="Should this Mob be allowed to wear armor (animal armor excluded)?")
special_functions = models.ManyToManyField(SpecialFunction, blank=True)
known_languages = models.ManyToManyField(KnownLanguage, help_text="What languages does this Mob know?")
preferred_language = models.ForeignKey(PreferredLanguage, help_text="What language does this Mob prefer?")
total_in_game = models.PositiveSmallIntegerField(blank=False, default=1, help_text="Total number of this mob allowed in the game (won't reset when max is reached)")
notes = models.TextField()
class Meta:
unique_together = ('area', 'vnum')
class Shopkeeper(models.Model):
"""
A shopkeeper mob.
BLOCK: #SHOPS
"""
mobile = models.OneToOneField(Mobile, blank=False, unique=True, help_text="Which Mob is running this shop?")
race = models.ForeignKey(Race, blank=False, help_text="What race is this shopkeeper?")
will_buy = models.CommaSeparatedIntegerField(max_length=50, blank=True, help_text="Item types the mob will buy. Only the first 5 will be used.")
opens = models.SmallIntegerField(blank=False, default=6, help_text="Hour of the day this shop opens. (0-23)")
closes = models.SmallIntegerField(blank=False, default=23, help_text="Hour of the day this shop closes. (0-23)")
reset_items = models.ManyToManyField(Item, blank=True)
class MobRoomReset(models.Model):
"""
AKA a Mob Reset.
Reset definition for a Mob into a Room.
BLOCK: #RESETS
"""
mobile = models.ForeignKey(Mobile, blank=False, related_name='room_resets')
room = models.ForeignKey('Room', blank=False, related_name='mob_resets')
reset_every_cycle = models.BooleanField(blank=False, default=False, help_text="Reset this item every cycle (instead of only when zone is deserted)?")
comment = models.TextField(blank=True)
class Meta:
verbose_name = 'mob-room reset'
unique_together = ('mobile', 'room')
class MobItemReset(models.Model):
"""
AKA a Give Reset, or an Equip Reset if a wear_location is set.
Reset definition for an object into a Mob.
BLOCK: #RESETS
"""
item = models.ForeignKey(Item, blank=False, related_name='mob_resets')
mobile = models.ForeignKey(Mobile, blank=False, related_name='item_resets')
reset_every_cycle = models.BooleanField(blank=False, default=False, help_text="Reset this item every cycle (instead of only when zone is deserted)?")
wear_location = models.ForeignKey(ResetWearFlag, blank=True, help_text="Optional: Where (if anywhere) do you want the Mob to equip this item?")
comment = models.TextField(blank=True)
class Meta:
verbose_name = 'mob-item reset'
unique_together = ('item', 'mobile')
### Room and Door models ###
class RoomType(models.Model):
"""
Types of terrain that a room can have/be.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class RoomFlag(models.Model):
"""
Flags that can be applied to a room.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
implemented = models.BooleanField(blank=False, default=True)
class RoomSpecialFunction(models.Model):
"""
Special functions that can be attached to a room.
BLOCK: #RSPECS
"""
TFC_id = models.CharField(max_length=50, blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class Room(models.Model):
"""
A room in TFC Area.
BLOCK: #ROOMS
"""
area = models.ForeignKey(Area, blank=False)
vnum = models.PositiveIntegerField(blank=False)
special_functions = models.ManyToManyField(RoomSpecialFunction, blank=True)
notes = models.TextField()
class Meta:
unique_together = ('area', 'vnum')
class ItemRoomReset(models.Model):
"""
AKA an Object Reset.
Resets an Item into a Room.
BLOCK: #RESETS
"""
room = models.ForeignKey(Room, blank=False, related_name='object_resets')
item = models.ForeignKey(Item, blank=False, related_name='room_resets')
reset_every_cycle = models.BooleanField(blank=False, default=False, help_text="Reset this item every cycle (instead of only when zone is deserted)?")
comment = models.TextField(blank=True)
class Meta:
verbose_name = 'item-room reset'
unique_together = ('room', 'item')
class DoorTrigger(models.Model):
"""
Door special functions - holds both Prevent and Allow Door Specfuns (Triggers).
BLOCK: #TRIGGERS
"""
door = models.ForeignKey('Door', blank=False, related_name='triggers')
TFC_id = models.CharField(max_length=50, blank=False)
trigger_type = models.CharField(max_length=1, choices=DOOR_TRIGGER_TYPE_CHOICES)
class DoorType(models.Model):
"""
Types that a door can be.
"""
TFC_id = models.PositiveIntegerField(blank=False)
name = models.CharField(max_length=50, blank=False, unique=True)
description = models.TextField()
class Door(models.Model):
"""
A door leading out of a room. A room can have up to six doors, one in each direction.
BLOCK: #ROOMS
Listed along with the Room to which they are attached.
"""
room = models.ForeignKey(Room, blank=False, related_name='exits')
name = models.CharField(max_length=50, blank=False, help_text='A one-word description of the door (e.g. "door" or "gate")')
direction = models.CharField(max_length=1, blank=False, choices=DIRECTION_CHOICES)
door_type = models.ForeignKey(DoorType, blank=False)
keywords = models.TextField(blank=False, help_text='Keywords for interacting with the door.')
description = models.TextField(blank=True)
room_to = models.ForeignKey(Room, blank=True, related_name='entrances')
reset = models.BooleanField(blank=False, default=False, help_text="Should this door be reset?")
reset_every_cycle = models.BooleanField(blank=False, default=False, help_text="Reset this door every cycle (instead of only when deserted)?")
reset_value = models.SmallIntegerField(blank=True, choices=DOOR_RESET_CHOICES)
reset_comment = models.TextField(blank=True)
notes = models.TextField()
class Meta:
unique_together = ('room', 'direction')
|
|
import hashlib
import logging
import random
import re
import time
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from celery.task import task
from django_statsd.clients import statsd
from timezones.fields import TimeZoneField
from kitsune.lib.countries import COUNTRIES
from kitsune.search.es_utils import UnindexMeBro
from kitsune.search.models import (
SearchMappingType, SearchMixin, register_for_indexing,
register_mapping_type)
from kitsune.sumo import email_utils
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files, chunked
from kitsune.users.validators import TwitterValidator
log = logging.getLogger('k.users')
SHA1_RE = re.compile('^[a-f0-9]{40}$')
CONTRIBUTOR_GROUP = 'Registered as contributor'
@auto_delete_files
class Profile(ModelBase, SearchMixin):
"""Profile model for django users."""
user = models.OneToOneField(User, primary_key=True,
verbose_name=_lazy(u'User'))
name = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Display name'))
public_email = models.BooleanField( # show/hide email
default=False, verbose_name=_lazy(u'Make my email public'))
avatar = models.ImageField(upload_to=settings.USER_AVATAR_PATH, null=True,
blank=True, verbose_name=_lazy(u'Avatar'),
max_length=settings.MAX_FILEPATH_LENGTH)
bio = models.TextField(null=True, blank=True,
verbose_name=_lazy(u'Biography'))
website = models.URLField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Website'))
twitter = models.CharField(max_length=15, null=True, blank=True, validators=[TwitterValidator],
verbose_name=_lazy(u'Twitter Username'))
facebook = models.URLField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Facebook URL'))
mozillians = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'Mozillians Username'))
irc_handle = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'IRC nickname'))
timezone = TimeZoneField(null=True, blank=True,
verbose_name=_lazy(u'Timezone'))
country = models.CharField(max_length=2, choices=COUNTRIES, null=True,
blank=True, verbose_name=_lazy(u'Country'))
# No city validation
city = models.CharField(max_length=255, null=True, blank=True,
verbose_name=_lazy(u'City'))
locale = LocaleField(default=settings.LANGUAGE_CODE,
verbose_name=_lazy(u'Preferred language'))
first_answer_email_sent = models.BooleanField(
default=False, help_text=_lazy(u'Has been sent a first answer contribution email.'))
first_l10n_email_sent = models.BooleanField(
default=False, help_text=_lazy(u'Has been sent a first revision contribution email.'))
involved_from = models.DateField(null=True, blank=True,
verbose_name=_lazy(u'Involved with Mozilla from'))
csat_email_sent = models.DateField(null=True, blank=True,
verbose_name=_lazy(u'When the user was sent a community '
u'health survey'))
class Meta(object):
permissions = (('view_karma_points', 'Can view karma points'),
('deactivate_users', 'Can deactivate users'),
('screen_share', 'Can screen share'),)
def __unicode__(self):
try:
return unicode(self.user)
except Exception as exc:
return unicode('%d (%r)' % (self.pk, exc))
def get_absolute_url(self):
return reverse('users.profile', args=[self.user_id])
def clear(self):
"""Clears out the users profile"""
self.name = ''
self.public_email = False
self.avatar = None
self.bio = ''
self.website = ''
self.twitter = ''
self.facebook = ''
self.mozillians = ''
self.irc_handle = ''
self.city = ''
@property
def display_name(self):
return self.name if self.name else self.user.username
@property
def twitter_usernames(self):
from kitsune.customercare.models import Reply
return list(
Reply.objects.filter(user=self.user)
.values_list('twitter_username', flat=True)
.distinct())
@classmethod
def get_mapping_type(cls):
return UserMappingType
@classmethod
def get_serializer(cls, serializer_type='full'):
# Avoid circular import
from kitsune.users import api
if serializer_type == 'full':
return api.ProfileSerializer
elif serializer_type == 'fk':
return api.ProfileFKSerializer
else:
raise ValueError('Unknown serializer type "{}".'.format(serializer_type))
@property
def last_contribution_date(self):
"""Get the date of the user's last contribution."""
from kitsune.customercare.models import Reply
from kitsune.questions.models import Answer
from kitsune.wiki.models import Revision
dates = []
# Latest Army of Awesome reply:
try:
aoa_reply = Reply.objects.filter(
user=self.user).latest('created')
dates.append(aoa_reply.created)
except Reply.DoesNotExist:
pass
# Latest Support Forum answer:
try:
answer = Answer.objects.filter(
creator=self.user).latest('created')
dates.append(answer.created)
except Answer.DoesNotExist:
pass
# Latest KB Revision edited:
try:
revision = Revision.objects.filter(
creator=self.user).latest('created')
dates.append(revision.created)
except Revision.DoesNotExist:
pass
# Latest KB Revision reviewed:
try:
revision = Revision.objects.filter(
reviewer=self.user).latest('reviewed')
# Old revisions don't have the reviewed date.
dates.append(revision.reviewed or revision.created)
except Revision.DoesNotExist:
pass
if len(dates) == 0:
return None
return max(dates)
@property
def settings(self):
return self.user.settings
@property
def answer_helpfulness(self):
# Avoid circular import
from kitsune.questions.models import AnswerVote
return AnswerVote.objects.filter(answer__creator=self.user, helpful=True).count()
@register_mapping_type
class UserMappingType(SearchMappingType):
list_keys = [
'twitter_usernames',
'itwitter_usernames',
]
@classmethod
def get_model(cls):
return Profile
@classmethod
def get_index_group(cls):
return 'non-critical'
@classmethod
def get_mapping(cls):
return {
'properties': {
'id': {'type': 'long'},
'model': {'type': 'string', 'index': 'not_analyzed'},
'url': {'type': 'string', 'index': 'not_analyzed'},
'indexed_on': {'type': 'integer'},
'username': {'type': 'string', 'index': 'not_analyzed'},
'display_name': {'type': 'string', 'index': 'not_analyzed'},
'twitter_usernames': {
'type': 'string',
'index': 'not_analyzed'
},
'last_contribution_date': {'type': 'date'},
# lower-cased versions for querying:
'iusername': {'type': 'string', 'index': 'not_analyzed'},
'idisplay_name': {'type': 'string', 'analyzer': 'whitespace'},
'itwitter_usernames': {
'type': 'string',
'index': 'not_analyzed'
},
'avatar': {'type': 'string', 'index': 'not_analyzed'},
'suggest': {
'type': 'completion',
'analyzer': 'whitespace',
'payloads': True,
}
}
}
@classmethod
def extract_document(cls, obj_id, obj=None):
"""Extracts interesting thing from a Thread and its Posts"""
if obj is None:
model = cls.get_model()
obj = model.objects.select_related('user').get(pk=obj_id)
if not obj.user.is_active:
raise UnindexMeBro()
d = {}
d['id'] = obj.pk
d['model'] = cls.get_mapping_type_name()
d['url'] = obj.get_absolute_url()
d['indexed_on'] = int(time.time())
d['username'] = obj.user.username
d['display_name'] = obj.display_name
d['twitter_usernames'] = obj.twitter_usernames
d['last_contribution_date'] = obj.last_contribution_date
d['iusername'] = obj.user.username.lower()
d['idisplay_name'] = obj.display_name.lower()
d['itwitter_usernames'] = [u.lower() for u in obj.twitter_usernames]
from kitsune.users.templatetags.jinja_helpers import profile_avatar
d['avatar'] = profile_avatar(obj.user, size=120)
d['suggest'] = {
'input': [
d['iusername'],
d['idisplay_name']
],
'output': _(u'{displayname} ({username})').format(
displayname=d['display_name'], username=d['username']),
'payload': {'user_id': d['id']},
}
return d
@classmethod
def suggest_completions(cls, text):
"""Suggest completions for the text provided."""
USER_SUGGEST = 'user-suggest'
es = UserMappingType.search().get_es()
results = es.suggest(index=cls.get_index(), body={
USER_SUGGEST: {
'text': text.lower(),
'completion': {
'field': 'suggest'
}
}
})
if results[USER_SUGGEST][0]['length'] > 0:
return results[USER_SUGGEST][0]['options']
return []
register_for_indexing('users', Profile)
def get_profile(u):
try:
return Profile.objects.get(user=u)
except Profile.DoesNotExist:
return None
register_for_indexing(
'users',
User,
instance_to_indexee=get_profile)
class Setting(ModelBase):
"""User specific value per setting"""
user = models.ForeignKey(User, verbose_name=_lazy(u'User'),
related_name='settings')
name = models.CharField(max_length=100)
value = models.CharField(blank=True, max_length=60,
verbose_name=_lazy(u'Value'))
class Meta(object):
unique_together = (('user', 'name'),)
def __unicode__(self):
return u'%s %s:%s' % (self.user, self.name, self.value or u'[none]')
@classmethod
def get_for_user(cls, user, name):
from kitsune.users.forms import SettingsForm
form = SettingsForm()
if name not in form.fields.keys():
raise KeyError(("'{name}' is not a field in "
"user.forms.SettingsFrom()").format(name=name))
try:
setting = Setting.objects.get(user=user, name=name)
except Setting.DoesNotExist:
value = form.fields[name].initial or ''
setting = Setting.objects.create(user=user, name=name, value=value)
# Cast to the field's Python type.
return form.fields[name].to_python(setting.value)
# Activation model and manager:
# (based on http://bitbucket.org/ubernostrum/django-registration)
class ConfirmationManager(models.Manager):
"""
Custom manager for confirming keys sent by email.
The methods defined here provide shortcuts for creation of instances
and sending email confirmations.
Activation should be done in specific managers.
"""
def _send_email(self, confirmation_profile, url,
subject, text_template, html_template,
send_to, **kwargs):
"""
Send an email using a passed in confirmation profile.
Use specified url, subject, text_template, html_template and
email to send_to.
"""
current_site = Site.objects.get_current()
email_kwargs = {'activation_key': confirmation_profile.activation_key,
'domain': current_site.domain,
'activate_url': url,
'login_url': reverse('users.login'),
'reg': 'main'}
email_kwargs.update(kwargs)
# RegistrationProfile doesn't have a locale attribute. So if
# we get one of those, then we have to get the real profile
# from the user.
if hasattr(confirmation_profile, 'locale'):
locale = confirmation_profile.locale
else:
locale = confirmation_profile.user.profile.locale
@email_utils.safe_translation
def _make_mail(locale):
mail = email_utils.make_mail(
subject=subject,
text_template=text_template,
html_template=html_template,
context_vars=email_kwargs,
from_email=settings.DEFAULT_FROM_EMAIL,
to_email=send_to)
return mail
email_utils.send_messages([_make_mail(locale)])
def send_confirmation_email(self, *args, **kwargs):
"""This is meant to be overwritten."""
raise NotImplementedError
def create_profile(self, user, *args, **kwargs):
"""
Create an instance of this manager's object class for a given
``User``, and return it.
The activation key will be a SHA1 hash, generated from a combination
of the ``User``'s username and a random salt.
"""
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt + user.username).hexdigest()
return self.create(user=user, activation_key=activation_key, **kwargs)
class RegistrationManager(ConfirmationManager):
def get_user(self, activation_key):
"""Get the user for the specified activation_key."""
try:
profile = self.get(activation_key=activation_key)
return profile.user
except self.model.DoesNotExist:
return None
def activate_user(self, activation_key, request=None):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
profile = None
statsd.incr('user.activate-error.does-not-exist')
reason = 'key not found'
if profile:
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
# We don't need the RegistrationProfile anymore, delete it.
profile.delete()
# If user registered as contributor, send them the
# welcome email.
if user.groups.filter(name=CONTRIBUTOR_GROUP):
self._send_email(
confirmation_profile=profile,
url=None,
subject=_('Welcome to SUMO!'),
text_template='users/email/contributor.ltxt',
html_template='users/email/contributor.html',
send_to=user.email,
contributor=user)
return user
else:
statsd.incr('user.activate-error.expired')
reason = 'key expired'
else:
statsd.incr('user.activate-error.invalid-key')
reason = 'invalid key'
log.warning(u'User activation failure ({r}): {k}'.format(
r=reason, k=activation_key))
return False
def create_inactive_user(self, username, password, email,
locale=settings.LANGUAGE_CODE,
text_template=None, html_template=None,
subject=None, email_data=None,
volunteer_interest=False, **kwargs):
"""
Create a new, inactive ``User`` and ``Profile``, generates a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
"""
new_user = User.objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
Profile.objects.create(user=new_user, locale=locale)
registration_profile = self.create_profile(new_user)
self.send_confirmation_email(
registration_profile,
text_template,
html_template,
subject,
email_data,
**kwargs)
if volunteer_interest:
statsd.incr('user.registered-as-contributor')
group = Group.objects.get(name=CONTRIBUTOR_GROUP)
new_user.groups.add(group)
return new_user
def send_confirmation_email(self, registration_profile,
text_template=None, html_template=None,
subject=None, email_data=None, **kwargs):
"""Send the user confirmation email."""
user_id = registration_profile.user.id
key = registration_profile.activation_key
self._send_email(
confirmation_profile=registration_profile,
url=reverse('users.activate', args=[user_id, key]),
subject=subject or _('Please confirm your email address'),
text_template=text_template or 'users/email/activate.ltxt',
html_template=html_template or 'users/email/activate.html',
send_to=registration_profile.user.email,
expiration_days=settings.ACCOUNT_ACTIVATION_DAYS,
username=registration_profile.user.username,
email_data=email_data,
**kwargs)
def delete_expired_users(self):
"""
Remove expired instances of this manager's object class.
Accounts to be deleted are identified by searching for
instances of this manager's object class with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
"""
days_valid = settings.ACCOUNT_ACTIVATION_DAYS
expired = datetime.now() - timedelta(days=days_valid)
prof_ids = self.filter(user__date_joined__lt=expired)
prof_ids = prof_ids.values_list('id', flat=True)
for chunk in chunked(prof_ids, 1000):
_delete_registration_profiles_chunk.apply_async(args=[chunk])
@task
def _delete_registration_profiles_chunk(data):
log_msg = u'Deleting {num} expired registration profiles.'
log.info(log_msg.format(num=len(data)))
qs = RegistrationProfile.objects.filter(id__in=data)
for profile in qs.select_related('user'):
user = profile.user
profile.delete()
if user and not user.is_active:
user.delete()
class EmailChangeManager(ConfirmationManager):
def send_confirmation_email(self, email_change, new_email):
"""Ask for confirmation before changing a user's email."""
self._send_email(
confirmation_profile=email_change,
url=reverse('users.confirm_email',
args=[email_change.activation_key]),
subject=_('Please confirm your email address'),
text_template='users/email/confirm_email.ltxt',
html_template='users/email/confirm_email.html',
send_to=new_email)
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key used for
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts.
"""
user = models.ForeignKey(User, unique=True, verbose_name=_lazy(u'user'))
activation_key = models.CharField(verbose_name=_lazy(u'activation key'),
max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _lazy(u'registration profile')
verbose_name_plural = _lazy(u'registration profiles')
def __unicode__(self):
return u'Registration information for %s' % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by:
1. The date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
exp_date = timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.user.date_joined + exp_date <= datetime.now()
activation_key_expired.boolean = True
class EmailChange(models.Model):
"""Stores email with activation key when user requests a change."""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.ForeignKey(User, unique=True, verbose_name=_lazy(u'user'))
activation_key = models.CharField(verbose_name=_lazy(u'activation key'),
max_length=40)
email = models.EmailField(db_index=True, null=True)
objects = EmailChangeManager()
def __unicode__(self):
return u'Change email request to %s for %s' % (self.email, self.user)
class Deactivation(models.Model):
"""Stores user deactivation logs."""
user = models.ForeignKey(User, verbose_name=_lazy(u'user'),
related_name='+')
moderator = models.ForeignKey(User, verbose_name=_lazy(u'moderator'),
related_name='deactivations')
date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return u'%s was deactivated by %s on %s' % (self.user, self.moderator,
self.date)
|
|
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron._i18n import _, _LI, _LW
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_const
from neutron.common import exceptions as n_exc
from neutron.common import utils as n_utils
from neutron.db import l3_agentschedulers_db as l3_sched_db
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants
from neutron.plugins.common import utils as p_utils
LOG = logging.getLogger(__name__)
router_distributed_opts = [
cfg.BoolOpt('router_distributed',
default=False,
help=_("System-wide flag to determine the type of router "
"that tenants can create. Only admin can override.")),
]
cfg.CONF.register_opts(router_distributed_opts)
class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
l3_attrs_db.ExtraAttributesMixin):
"""Mixin class to enable DVR support."""
router_device_owners = (
l3_db.L3_NAT_db_mixin.router_device_owners +
(l3_const.DEVICE_OWNER_DVR_INTERFACE,
l3_const.DEVICE_OWNER_ROUTER_SNAT,
l3_const.DEVICE_OWNER_AGENT_GW))
extra_attributes = (
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
'name': "distributed",
'default': cfg.CONF.router_distributed
}])
def _create_router_db(self, context, router, tenant_id):
"""Create a router db object with dvr additions."""
router['distributed'] = is_distributed_router(router)
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._create_router_db(
context, router, tenant_id)
self._process_extra_attr_router_create(context, router_db, router)
return router_db
def _validate_router_migration(self, context, router_db, router_res):
"""Allow centralized -> distributed state transition only."""
if (router_db.extra_attributes.distributed and
router_res.get('distributed') is False):
LOG.info(_LI("Centralizing distributed router %s "
"is not supported"), router_db['id'])
raise n_exc.BadRequest(
resource='router',
msg=_("Migration from distributed router to centralized is "
"not supported"))
elif (not router_db.extra_attributes.distributed and
router_res.get('distributed')):
# router should be disabled in order for upgrade
if router_db.admin_state_up:
msg = _('Cannot upgrade active router to distributed. Please '
'set router admin_state_up to False prior to upgrade.')
raise n_exc.BadRequest(resource='router', msg=msg)
# Notify advanced services of the imminent state transition
# for the router.
try:
kwargs = {'context': context, 'router': router_db}
registry.notify(
resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs)
except exceptions.CallbackFailure as e:
with excutils.save_and_reraise_exception():
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router_db['id'],
reason=e)
def _update_distributed_attr(
self, context, router_id, router_db, data):
"""Update the model to support the dvr case of a router."""
if data.get('distributed'):
old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF
new_owner = l3_const.DEVICE_OWNER_DVR_INTERFACE
for rp in router_db.attached_ports.filter_by(port_type=old_owner):
rp.port_type = new_owner
rp.port.device_owner = new_owner
def _update_router_db(self, context, router_id, data):
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._update_router_db(
context, router_id, data)
migrating_to_distributed = (
not router_db.extra_attributes.distributed and
data.get('distributed') is True)
self._validate_router_migration(context, router_db, data)
router_db.extra_attributes.update(data)
self._update_distributed_attr(
context, router_id, router_db, data)
if migrating_to_distributed:
if router_db['gw_port_id']:
# If the Legacy router is getting migrated to a DVR
# router, make sure to create corresponding
# snat interface ports that are to be consumed by
# the Service Node.
if not self._create_snat_intf_ports_if_not_exists(
context.elevated(), router_db):
LOG.debug("SNAT interface ports not created: %s",
router_db['id'])
cur_agents = self.list_l3_agents_hosting_router(
context, router_db['id'])['agents']
for agent in cur_agents:
self._unbind_router(context, router_db['id'],
agent['id'])
return router_db
def _delete_current_gw_port(self, context, router_id, router, new_network):
"""
Overriden here to handle deletion of dvr internal ports.
If there is a valid router update with gateway port to be deleted,
then go ahead and delete the csnat ports and the floatingip
agent gateway port associated with the dvr router.
"""
gw_ext_net_id = (
router.gw_port['network_id'] if router.gw_port else None)
super(L3_NAT_with_dvr_db_mixin,
self)._delete_current_gw_port(context, router_id,
router, new_network)
if (is_distributed_router(router) and
gw_ext_net_id != new_network and gw_ext_net_id is not None):
self.delete_csnat_router_interface_ports(
context.elevated(), router)
# NOTE(Swami): Delete the Floatingip agent gateway port
# on all hosts when it is the last gateway port in the
# given external network.
filters = {'network_id': [gw_ext_net_id],
'device_owner': [l3_const.DEVICE_OWNER_ROUTER_GW]}
ext_net_gw_ports = self._core_plugin.get_ports(
context.elevated(), filters)
if not ext_net_gw_ports:
self.delete_floatingip_agent_gateway_port(
context.elevated(), None, gw_ext_net_id)
# Send the information to all the L3 Agent hosts
# to clean up the fip namespace as it is no longer required.
self.l3_rpc_notifier.delete_fipnamespace_for_ext_net(
context, gw_ext_net_id)
def _create_gw_port(self, context, router_id, router, new_network,
ext_ips):
super(L3_NAT_with_dvr_db_mixin,
self)._create_gw_port(context, router_id, router, new_network,
ext_ips)
# Make sure that the gateway port exists before creating the
# snat interface ports for distributed router.
if router.extra_attributes.distributed and router.gw_port:
snat_p_list = self._create_snat_intf_ports_if_not_exists(
context.elevated(), router)
if not snat_p_list:
LOG.debug("SNAT interface ports not created: %s", snat_p_list)
def _get_device_owner(self, context, router=None):
"""Get device_owner for the specified router."""
router_is_uuid = isinstance(router, six.string_types)
if router_is_uuid:
router = self._get_router(context, router)
if is_distributed_router(router):
return l3_const.DEVICE_OWNER_DVR_INTERFACE
return super(L3_NAT_with_dvr_db_mixin,
self)._get_device_owner(context, router)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Override to create floating agent gw port for DVR.
Floating IP Agent gateway port will be created when a
floatingIP association happens.
"""
fip_port = fip.get('port_id')
super(L3_NAT_with_dvr_db_mixin, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
associate_fip = fip_port and floatingip_db['id']
if associate_fip and floatingip_db.get('router_id'):
admin_ctx = context.elevated()
router_dict = self.get_router(
admin_ctx, floatingip_db['router_id'])
# Check if distributed router and then create the
# FloatingIP agent gateway port
if router_dict.get('distributed'):
hostid = self._get_dvr_service_port_hostid(
context, fip_port)
if hostid:
# FIXME (Swami): This FIP Agent Gateway port should be
# created only once and there should not be a duplicate
# for the same host. Until we find a good solution for
# augmenting multiple server requests we should use the
# existing flow.
fip_agent_port = (
self.create_fip_agent_gw_port_if_not_exists(
admin_ctx, external_port['network_id'],
hostid))
LOG.debug("FIP Agent gateway port: %s", fip_agent_port)
def _get_floatingip_on_port(self, context, port_id=None):
"""Helper function to retrieve the fip associated with port."""
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ip = fip_qry.filter_by(fixed_port_id=port_id)
return floating_ip.first()
def add_router_interface(self, context, router_id, interface_info):
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
# This should be True unless adding an IPv6 prefix to an existing port
new_port = True
if add_by_port:
port, subnets = self._add_interface_by_port(
context, router, interface_info['port_id'], device_owner)
elif add_by_sub:
port, subnets, new_port = self._add_interface_by_subnet(
context, router, interface_info['subnet_id'], device_owner)
subnet = subnets[0]
if new_port:
if router.extra_attributes.distributed and router.gw_port:
try:
admin_context = context.elevated()
self._add_csnat_router_interface_port(
admin_context, router, port['network_id'],
port['fixed_ips'][-1]['subnet_id'])
except Exception:
with excutils.save_and_reraise_exception():
# we need to preserve the original state prior
# the request by rolling back the port creation
# that led to new_port=True
self._core_plugin.delete_port(
admin_context, port['id'])
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=port['id'],
router_id=router.id,
port_type=device_owner
)
context.session.add(router_port)
# NOTE: For IPv6 additional subnets added to the same
# network we need to update the CSNAT port with respective
# IPv6 subnet
elif subnet and port:
fixed_ip = {'subnet_id': subnet['id']}
if subnet['ip_version'] == 6:
# Add new prefix to an existing ipv6 csnat port with the
# same network id if one exists
cs_port = self._find_router_port_by_network_and_device_owner(
router, subnet['network_id'],
l3_const.DEVICE_OWNER_ROUTER_SNAT)
if cs_port:
fixed_ips = list(cs_port['port']['fixed_ips'])
fixed_ips.append(fixed_ip)
updated_port = self._core_plugin.update_port(
context.elevated(),
cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}})
LOG.debug("CSNAT port updated for IPv6 subnet: "
"%s", updated_port)
router_interface_info = self._make_router_interface_info(
router_id, port['tenant_id'], port['id'], port['network_id'],
subnet['id'], [subnet['id']])
self.notify_router_interface_action(
context, router_interface_info, 'add')
return router_interface_info
def _port_has_ipv6_address(self, port, csnat_port_check=True):
"""Overridden to return False if DVR SNAT port."""
if csnat_port_check:
if port['device_owner'] == l3_const.DEVICE_OWNER_ROUTER_SNAT:
return False
return super(L3_NAT_with_dvr_db_mixin,
self)._port_has_ipv6_address(port)
def _find_router_port_by_network_and_device_owner(
self, router, net_id, device_owner):
for port in router.attached_ports:
p = port['port']
if (p['network_id'] == net_id and
p['device_owner'] == device_owner and
self._port_has_ipv6_address(p, csnat_port_check=False)):
return port
def _check_for_multiprefix_csnat_port_and_update(
self, context, router, network_id, subnet_id):
"""Checks if the csnat port contains multiple ipv6 prefixes.
If the csnat port contains multiple ipv6 prefixes for the given
network when a router interface is deleted, make sure we don't
delete the port when a single subnet is deleted and just update
it with the right fixed_ip.
This function returns true if it is a multiprefix port.
"""
if router.gw_port:
# If router has a gateway port, check if it has IPV6 subnet
cs_port = (
self._find_router_port_by_network_and_device_owner(
router, network_id, l3_const.DEVICE_OWNER_ROUTER_SNAT))
if cs_port:
fixed_ips = (
[fixedip for fixedip in
cs_port['port']['fixed_ips']
if fixedip['subnet_id'] != subnet_id])
if fixed_ips:
# multiple prefix port - delete prefix from port
self._core_plugin.update_port(
context.elevated(),
cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}})
return True
return False
def remove_router_interface(self, context, router_id, interface_info):
router = self._get_router(context, router_id)
if not router.extra_attributes.distributed:
return super(
L3_NAT_with_dvr_db_mixin, self).remove_router_interface(
context, router_id, interface_info)
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
router_hosts_before = plugin._get_dvr_hosts_for_router(
context, router_id)
interface_info = super(
L3_NAT_with_dvr_db_mixin, self).remove_router_interface(
context, router_id, interface_info)
router_hosts_after = plugin._get_dvr_hosts_for_router(
context, router_id)
removed_hosts = set(router_hosts_before) - set(router_hosts_after)
if removed_hosts:
agents = plugin.get_l3_agents(context,
filters={'host': removed_hosts})
binding_table = l3_sched_db.RouterL3AgentBinding
snat_binding = context.session.query(binding_table).filter_by(
router_id=router_id).first()
for agent in agents:
is_this_snat_agent = (
snat_binding and snat_binding.l3_agent_id == agent['id'])
if not is_this_snat_agent:
self.l3_rpc_notifier.router_removed_from_agent(
context, router_id, agent['host'])
is_multiple_prefix_csport = (
self._check_for_multiprefix_csnat_port_and_update(
context, router, interface_info['network_id'],
interface_info['subnet_id']))
if not is_multiple_prefix_csport:
# Single prefix port - go ahead and delete the port
self.delete_csnat_router_interface_ports(
context.elevated(), router,
subnet_id=interface_info['subnet_id'])
return interface_info
def _get_snat_sync_interfaces(self, context, router_ids):
"""Query router interfaces that relate to list of router_ids."""
if not router_ids:
return []
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter(
l3_db.RouterPort.router_id.in_(router_ids),
l3_db.RouterPort.port_type == l3_const.DEVICE_OWNER_ROUTER_SNAT
)
interfaces = collections.defaultdict(list)
for rp in qry:
interfaces[rp.router_id].append(
self._core_plugin._make_port_dict(rp.port, None))
LOG.debug("Return the SNAT ports: %s", interfaces)
return interfaces
def _build_routers_list(self, context, routers, gw_ports):
# Perform a single query up front for all routers
if not routers:
return []
router_ids = [r['id'] for r in routers]
snat_binding = l3_sched_db.RouterL3AgentBinding
query = (context.session.query(snat_binding).
filter(snat_binding.router_id.in_(router_ids))).all()
bindings = dict((b.router_id, b) for b in query)
for rtr in routers:
gw_port_id = rtr['gw_port_id']
# Collect gw ports only if available
if gw_port_id and gw_ports.get(gw_port_id):
rtr['gw_port'] = gw_ports[gw_port_id]
if 'enable_snat' in rtr[l3.EXTERNAL_GW_INFO]:
rtr['enable_snat'] = (
rtr[l3.EXTERNAL_GW_INFO]['enable_snat'])
binding = bindings.get(rtr['id'])
if not binding:
rtr['gw_port_host'] = None
LOG.debug('No snat is bound to router %s', rtr['id'])
continue
rtr['gw_port_host'] = binding.l3_agent.host
return routers
def _process_routers(self, context, routers):
routers_dict = {}
snat_intfs_by_router_id = self._get_snat_sync_interfaces(
context, [r['id'] for r in routers])
for router in routers:
routers_dict[router['id']] = router
if router['gw_port_id']:
snat_router_intfs = snat_intfs_by_router_id[router['id']]
LOG.debug("SNAT ports returned: %s ", snat_router_intfs)
router[l3_const.SNAT_ROUTER_INTF_KEY] = snat_router_intfs
return routers_dict
def _process_floating_ips_dvr(self, context, routers_dict,
floating_ips, host, agent):
fip_sync_interfaces = None
LOG.debug("FIP Agent : %s ", agent.id)
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(l3_const.FLOATINGIP_KEY, [])
if router['distributed']:
if floating_ip.get('host', None) != host:
continue
LOG.debug("Floating IP host: %s", floating_ip['host'])
router_floatingips.append(floating_ip)
router[l3_const.FLOATINGIP_KEY] = router_floatingips
if not fip_sync_interfaces:
fip_sync_interfaces = self._get_fip_sync_interfaces(
context, agent.id)
LOG.debug("FIP Agent ports: %s", fip_sync_interfaces)
router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = (
fip_sync_interfaces)
def _get_fip_sync_interfaces(self, context, fip_agent_id):
"""Query router interfaces that relate to list of router_ids."""
if not fip_agent_id:
return []
filters = {'device_id': [fip_agent_id],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]}
interfaces = self._core_plugin.get_ports(context.elevated(), filters)
LOG.debug("Return the FIP ports: %s ", interfaces)
return interfaces
def _get_dvr_sync_data(self, context, host, agent, router_ids=None,
active=None):
routers, interfaces, floating_ips = self._get_router_info_list(
context, router_ids=router_ids, active=active,
device_owners=l3_const.ROUTER_INTERFACE_OWNERS)
dvr_router_ids = set(router['id'] for router in routers
if is_distributed_router(router))
floating_ip_port_ids = [fip['port_id'] for fip in floating_ips
if fip['router_id'] in dvr_router_ids]
if floating_ip_port_ids:
port_filter = {portbindings.HOST_ID: [host],
'id': floating_ip_port_ids}
ports = self._core_plugin.get_ports(context, port_filter)
port_dict = dict((port['id'], port) for port in ports)
# Add the port binding host to the floatingip dictionary
for fip in floating_ips:
vm_port = port_dict.get(fip['port_id'], None)
if vm_port:
fip['host'] = self._get_dvr_service_port_hostid(
context, fip['port_id'], port=vm_port)
routers_dict = self._process_routers(context, routers)
self._process_floating_ips_dvr(context, routers_dict,
floating_ips, host, agent)
ports_to_populate = []
for router in routers_dict.values():
if router.get('gw_port'):
ports_to_populate.append(router['gw_port'])
if router.get(l3_const.FLOATINGIP_AGENT_INTF_KEY):
ports_to_populate += router[l3_const.FLOATINGIP_AGENT_INTF_KEY]
if router.get(l3_const.SNAT_ROUTER_INTF_KEY):
ports_to_populate += router[l3_const.SNAT_ROUTER_INTF_KEY]
ports_to_populate += interfaces
self._populate_mtu_and_subnets_for_ports(context, ports_to_populate)
self._process_interfaces(routers_dict, interfaces)
return list(routers_dict.values())
def _get_dvr_service_port_hostid(self, context, port_id, port=None):
"""Returns the portbinding host_id for dvr service port."""
port_db = port or self._core_plugin.get_port(context, port_id)
device_owner = port_db['device_owner'] if port_db else ""
if (n_utils.is_dvr_serviced(device_owner) or
device_owner == l3_const.DEVICE_OWNER_AGENT_GW):
return port_db[portbindings.HOST_ID]
def _get_agent_gw_ports_exist_for_network(
self, context, network_id, host, agent_id):
"""Return agent gw port if exist, or None otherwise."""
if not network_id:
LOG.debug("Network not specified")
return
filters = {
'network_id': [network_id],
'device_id': [agent_id],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]
}
ports = self._core_plugin.get_ports(context, filters)
if ports:
return ports[0]
def delete_floatingip_agent_gateway_port(
self, context, host_id, ext_net_id):
"""Function to delete FIP gateway port with given ext_net_id."""
# delete any fip agent gw port
device_filter = {'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW],
'network_id': [ext_net_id]}
ports = self._core_plugin.get_ports(context,
filters=device_filter)
for p in ports:
if not host_id or p[portbindings.HOST_ID] == host_id:
self._core_plugin.ipam.delete_port(context, p['id'])
if host_id:
return
def create_fip_agent_gw_port_if_not_exists(
self, context, network_id, host):
"""Function to return the FIP Agent GW port.
This function will create a FIP Agent GW port
if required. If the port already exists, it
will return the existing port and will not
create a new one.
"""
l3_agent_db = self._get_agent_by_type_and_host(
context, l3_const.AGENT_TYPE_L3, host)
if l3_agent_db:
LOG.debug("Agent ID exists: %s", l3_agent_db['id'])
f_port = self._get_agent_gw_ports_exist_for_network(
context, network_id, host, l3_agent_db['id'])
if not f_port:
LOG.info(_LI('Agent Gateway port does not exist,'
' so create one: %s'), f_port)
port_data = {'tenant_id': '',
'network_id': network_id,
'device_id': l3_agent_db['id'],
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW,
portbindings.HOST_ID: host,
'admin_state_up': True,
'name': ''}
agent_port = p_utils.create_port(self._core_plugin, context,
{'port': port_data})
if agent_port:
self._populate_mtu_and_subnets_for_ports(context,
[agent_port])
return agent_port
msg = _("Unable to create the Agent Gateway Port")
raise n_exc.BadRequest(resource='router', msg=msg)
else:
self._populate_mtu_and_subnets_for_ports(context, [f_port])
return f_port
def _get_snat_interface_ports_for_router(self, context, router_id):
"""Return all existing snat_router_interface ports."""
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter_by(
router_id=router_id,
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
)
ports = [self._core_plugin._make_port_dict(rp.port, None)
for rp in qry]
return ports
def _add_csnat_router_interface_port(
self, context, router, network_id, subnet_id, do_pop=True):
"""Add SNAT interface to the specified router and subnet."""
port_data = {'tenant_id': '',
'network_id': network_id,
'fixed_ips': [{'subnet_id': subnet_id}],
'device_id': router.id,
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT,
'admin_state_up': True,
'name': ''}
snat_port = p_utils.create_port(self._core_plugin, context,
{'port': port_data})
if not snat_port:
msg = _("Unable to create the SNAT Interface Port")
raise n_exc.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=snat_port['id'],
router_id=router.id,
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
)
context.session.add(router_port)
if do_pop:
return self._populate_mtu_and_subnets_for_ports(context,
[snat_port])
return snat_port
def _create_snat_intf_ports_if_not_exists(self, context, router):
"""Function to return the snat interface port list.
This function will return the snat interface port list
if it exists. If the port does not exist it will create
new ports and then return the list.
"""
port_list = self._get_snat_interface_ports_for_router(
context, router.id)
if port_list:
self._populate_mtu_and_subnets_for_ports(context, port_list)
return port_list
port_list = []
int_ports = (
rp.port for rp in
router.attached_ports.filter_by(
port_type=l3_const.DEVICE_OWNER_DVR_INTERFACE
)
)
LOG.info(_LI('SNAT interface port list does not exist,'
' so create one: %s'), port_list)
for intf in int_ports:
if intf.fixed_ips:
# Passing the subnet for the port to make sure the IP's
# are assigned on the right subnet if multiple subnet
# exists
snat_port = self._add_csnat_router_interface_port(
context, router, intf['network_id'],
intf['fixed_ips'][0]['subnet_id'], do_pop=False)
port_list.append(snat_port)
if port_list:
self._populate_mtu_and_subnets_for_ports(context, port_list)
return port_list
def _generate_arp_table_and_notify_agent(
self, context, fixed_ip, mac_address, notifier):
"""Generates the arp table entry and notifies the l3 agent."""
ip_address = fixed_ip['ip_address']
subnet = fixed_ip['subnet_id']
filters = {'fixed_ips': {'subnet_id': [subnet]},
'device_owner': [l3_const.DEVICE_OWNER_DVR_INTERFACE]}
ports = self._core_plugin.get_ports(context, filters=filters)
router_id = next((port['device_id'] for port in ports), None)
if not router_id:
return
arp_table = {'ip_address': ip_address,
'mac_address': mac_address,
'subnet_id': subnet}
notifier(context, router_id, arp_table)
def _should_update_arp_entry_for_dvr_service_port(self, port_dict):
# Check this is a valid VM or service port
return (n_utils.is_dvr_serviced(port_dict['device_owner']) and
port_dict['fixed_ips'])
def update_arp_entry_for_dvr_service_port(self, context, port_dict):
"""Notify L3 agents of ARP table entry for dvr service port.
When a dvr service port goes up, look for the DVR router on
the port's subnet, and send the ARP details to all
L3 agents hosting the router to add it.
"""
if not self._should_update_arp_entry_for_dvr_service_port(port_dict):
return
changed_fixed_ips = port_dict['fixed_ips']
for fixed_ip in changed_fixed_ips:
self._generate_arp_table_and_notify_agent(
context, fixed_ip, port_dict['mac_address'],
self.l3_rpc_notifier.add_arp_entry)
def delete_arp_entry_for_dvr_service_port(self, context, port_dict):
"""Notify L3 agents of ARP table entry for dvr service port.
When a dvr service port goes down, look for the DVR
router on the port's subnet, and send the ARP details to all
L3 agents hosting the router to delete it.
"""
if not self._should_update_arp_entry_for_dvr_service_port(port_dict):
return
changed_fixed_ips = port_dict['fixed_ips']
for fixed_ip in changed_fixed_ips:
self._generate_arp_table_and_notify_agent(
context, fixed_ip, port_dict['mac_address'],
self.l3_rpc_notifier.del_arp_entry)
def delete_csnat_router_interface_ports(self, context,
router, subnet_id=None):
# Each csnat router interface port is associated
# with a subnet, so we need to pass the subnet id to
# delete the right ports.
# TODO(markmcclain): This is suboptimal but was left to reduce
# changeset size since it is late in cycle
ports = [
rp.port.id for rp in
router.attached_ports.filter_by(
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT)
if rp.port
]
c_snat_ports = self._core_plugin.get_ports(
context,
filters={'id': ports}
)
for p in c_snat_ports:
if subnet_id is None:
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
else:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
LOG.debug("Subnet matches: %s", subnet_id)
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
def create_floatingip(self, context, floatingip,
initial_status=l3_const.FLOATINGIP_STATUS_ACTIVE):
floating_ip = self._create_floatingip(
context, floatingip, initial_status)
self._notify_floating_ip_change(context, floating_ip)
return floating_ip
def _notify_floating_ip_change(self, context, floating_ip):
router_id = floating_ip['router_id']
fixed_port_id = floating_ip['port_id']
# we need to notify agents only in case Floating IP is associated
if not router_id or not fixed_port_id:
return
try:
# using admin context as router may belong to admin tenant
router = self._get_router(context.elevated(), router_id)
except l3.RouterNotFound:
LOG.warning(_LW("Router %s was not found. "
"Skipping agent notification."),
router_id)
return
if is_distributed_router(router):
host = self._get_dvr_service_port_hostid(context, fixed_port_id)
self.l3_rpc_notifier.routers_updated_on_host(
context, [router_id], host)
else:
self.notify_router_updated(context, router_id)
def update_floatingip(self, context, id, floatingip):
old_floatingip, floatingip = self._update_floatingip(
context, id, floatingip)
self._notify_floating_ip_change(context, old_floatingip)
if (floatingip['router_id'] != old_floatingip['router_id'] or
floatingip['port_id'] != old_floatingip['port_id']):
self._notify_floating_ip_change(context, floatingip)
return floatingip
def delete_floatingip(self, context, id):
floating_ip = self._delete_floatingip(context, id)
self._notify_floating_ip_change(context, floating_ip)
def is_distributed_router(router):
"""Return True if router to be handled is distributed."""
try:
# See if router is a DB object first
requested_router_type = router.extra_attributes.distributed
except AttributeError:
# if not, try to see if it is a request body
requested_router_type = router.get('distributed')
if attributes.is_attr_set(requested_router_type):
return requested_router_type
return cfg.CONF.router_distributed
|
|
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats,special
import scipy as sp
import link_functions
from ..util.misc import chain_1, chain_2, chain_3
from scipy.integrate import quad
import warnings
from ..core.parameterization import Parameterized
class Likelihood(Parameterized):
"""
Likelihood base class, used to defing p(y|f).
All instances use _inverse_ link functions, which can be swapped out. It is
expected that inheriting classes define a default inverse link function
To use this class, inherit and define missing functionality.
Inheriting classes *must* implement:
pdf_link : a bound method which turns the output of the link function into the pdf
logpdf_link : the logarithm of the above
To enable use with EP, inheriting classes *must* define:
TODO: a suitable derivative function for any parameters of the class
It is also desirable to define:
moments_match_ep : a function to compute the EP moments If this isn't defined, the moments will be computed using 1D quadrature.
To enable use with Laplace approximation, inheriting classes *must* define:
Some derivative functions *AS TODO*
For exact Gaussian inference, define *JH TODO*
"""
def __init__(self, gp_link, name):
super(Likelihood, self).__init__(name)
assert isinstance(gp_link,link_functions.GPTransformation), "gp_link is not a valid GPTransformation."
self.gp_link = gp_link
self.log_concave = False
def _gradients(self,partial):
return np.zeros(0)
def update_gradients(self, partial):
if self.size > 0:
raise NotImplementedError('Must be implemented for likelihoods with parameters to be optimized')
def _preprocess_values(self,Y):
"""
In case it is needed, this function assess the output values or makes any pertinent transformation on them.
:param Y: observed output
:type Y: Nx1 numpy.darray
"""
return Y
def conditional_mean(self, gp):
"""
The mean of the random variable conditioned on one value of the GP
"""
raise NotImplementedError
def conditional_variance(self, gp):
"""
The variance of the random variable conditioned on one value of the GP
"""
raise NotImplementedError
def log_predictive_density(self, y_test, mu_star, var_star):
"""
Calculation of the log predictive density
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param mu_star: predictive mean of gaussian p(f_{*}|mu_{*}, var_{*})
:type mu_star: (Nx1) array
:param var_star: predictive variance of gaussian p(f_{*}|mu_{*}, var_{*})
:type var_star: (Nx1) array
"""
assert y_test.shape==mu_star.shape
assert y_test.shape==var_star.shape
assert y_test.shape[1] == 1
def integral_generator(y, m, v):
"""Generate a function which can be integrated to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*"""
def f(f_star):
return self.pdf(f_star, y)*np.exp(-(1./(2*v))*np.square(m-f_star))
return f
scaled_p_ystar, accuracy = zip(*[quad(integral_generator(y, m, v), -np.inf, np.inf) for y, m, v in zip(y_test.flatten(), mu_star.flatten(), var_star.flatten())])
scaled_p_ystar = np.array(scaled_p_ystar).reshape(-1,1)
p_ystar = scaled_p_ystar/np.sqrt(2*np.pi*var_star)
return np.log(p_ystar)
def _moments_match_ep(self,obs,tau,v):
"""
Calculation of moments using quadrature
:param obs: observed output
:param tau: cavity distribution 1st natural parameter (precision)
:param v: cavity distribution 2nd natural paramenter (mu*precision)
"""
#Compute first integral for zeroth moment.
#NOTE constant np.sqrt(2*pi/tau) added at the end of the function
mu = v/tau
def int_1(f):
return self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f))
z_scaled, accuracy = quad(int_1, -np.inf, np.inf)
#Compute second integral for first moment
def int_2(f):
return f*self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f))
mean, accuracy = quad(int_2, -np.inf, np.inf)
mean /= z_scaled
#Compute integral for variance
def int_3(f):
return (f**2)*self.pdf(f, obs)*np.exp(-0.5*tau*np.square(mu-f))
Ef2, accuracy = quad(int_3, -np.inf, np.inf)
Ef2 /= z_scaled
variance = Ef2 - mean**2
#Add constant to the zeroth moment
#NOTE: this constant is not needed in the other moments because it cancells out.
z = z_scaled/np.sqrt(2*np.pi/tau)
return z, mean, variance
def variational_expectations(self, Y, m, v, gh_points=None):
"""
Use Gauss-Hermite Quadrature to compute
E_p(f) [ log p(y|f) ]
d/dm E_p(f) [ log p(y|f) ]
d/dv E_p(f) [ log p(y|f) ]
where p(f) is a Gaussian with mean m and variance v. The shapes of Y, m and v should match.
if no gh_points are passed, we construct them using defualt options
"""
if gh_points is None:
gh_x, gh_w = np.polynomial.hermite.hermgauss(12)
else:
gh_x, gh_w = gh_points
shape = m.shape
m,v,Y = m.flatten(), v.flatten(), Y.flatten()
#make a grid of points
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None]
#evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid.
# broadcast needs to be handled carefully.
logp = self.logpdf(X,Y[:,None])
dlogp_dx = self.dlogpdf_df(X, Y[:,None])
d2logp_dx2 = self.d2logpdf_df2(X, Y[:,None])
#clipping for numerical stability
logp = np.clip(logp,-1e6,1e6)
dlogp_dx = np.clip(dlogp_dx,-1e6,1e6)
d2logp_dx2 = np.clip(d2logp_dx2,-1e6,1e6)
#average over the gird to get derivatives of the Gaussian's parameters
F = np.dot(logp, gh_w)
dF_dm = np.dot(dlogp_dx, gh_w)
dF_dv = np.dot(d2logp_dx2, gh_w)/2.
if np.any(np.isnan(dF_dv)) or np.any(np.isinf(dF_dv)):
stop
if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)):
stop
return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape)
def predictive_mean(self, mu, variance, Y_metadata=None):
"""
Quadrature calculation of the predictive mean: E(Y_star|Y) = E( E(Y_star|f_star, Y) )
:param mu: mean of posterior
:param sigma: standard deviation of posterior
"""
#conditional_mean: the edpected value of y given some f, under this likelihood
def int_mean(f,m,v):
p = np.exp(-(0.5/v)*np.square(f - m))
#If p is zero then conditional_mean will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_mean(f)*p
scaled_mean = [quad(int_mean, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance))
return mean
def _conditional_mean(self, f):
"""Quadrature calculation of the conditional mean: E(Y_star|f)"""
raise NotImplementedError, "implement this function to make predictions"
def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None):
"""
Approximation to the predictive variance: V(Y_star)
The following variance decomposition is used:
V(Y_star) = E( V(Y_star|f_star) ) + V( E(Y_star|f_star) )
:param mu: mean of posterior
:param sigma: standard deviation of posterior
:predictive_mean: output's predictive mean, if None _predictive_mean function will be called.
"""
#sigma2 = sigma**2
normalizer = np.sqrt(2*np.pi*variance)
# E( V(Y_star|f_star) )
def int_var(f,m,v):
p = np.exp(-(0.5/v)*np.square(f - m))
#If p is zero then conditional_variance will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_variance(f)*p
scaled_exp_variance = [quad(int_var, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
exp_var = np.array(scaled_exp_variance)[:,None] / normalizer
#V( E(Y_star|f_star) ) = E( E(Y_star|f_star)**2 ) - E( E(Y_star|f_star) )**2
#E( E(Y_star|f_star) )**2
if predictive_mean is None:
predictive_mean = self.predictive_mean(mu,variance)
predictive_mean_sq = predictive_mean**2
#E( E(Y_star|f_star)**2 )
def int_pred_mean_sq(f,m,v,predictive_mean_sq):
p = np.exp(-(0.5/v)*np.square(f - m))
#If p is zero then conditional_mean**2 will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_mean(f)**2*p
scaled_exp_exp2 = [quad(int_pred_mean_sq, -np.inf, np.inf,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)]
exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer
var_exp = exp_exp2 - predictive_mean_sq
# V(Y_star) = E[ V(Y_star|f_star) ] + V[ E(Y_star|f_star) ]
# V(Y_star) = E[ V(Y_star|f_star) ] + E(Y_star**2|f_star) - E[Y_star|f_star]**2
return exp_var + var_exp
def pdf_link(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def logpdf_link(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_dlink(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_link_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_dlink_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d2logpdf_dlink2_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def pdf(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the likelihood (pdf) using it
.. math:
p(y|\\lambda(f))
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: likelihood evaluated for this point
:rtype: float
"""
inv_link_f = self.gp_link.transf(f)
return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata)
def logpdf(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the log likelihood (log pdf) using it
.. math:
\\log p(y|\\lambda(f))
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: log likelihood evaluated for this point
:rtype: float
"""
inv_link_f = self.gp_link.transf(f)
return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata)
def dlogpdf_df(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d\\log p(y|\\lambda(f))}{df} = \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d\\lambda(f)}{df}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: derivative of log likelihood evaluated for this point
:rtype: 1xN array
"""
inv_link_f = self.gp_link.transf(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
return chain_1(dlogpdf_dlink, dlink_df)
def d2logpdf_df2(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the second derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{2}\\log p(y|\\lambda(f))}{df^{2}} = \\frac{d^{2}\\log p(y|\\lambda(f))}{d^{2}\\lambda(f)}\\left(\\frac{d\\lambda(f)}{df}\\right)^{2} + \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d^{2}\\lambda(f)}{df^{2}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: second derivative of log likelihood evaluated for this point (diagonal only)
:rtype: 1xN array
"""
inv_link_f = self.gp_link.transf(f)
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
return chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2)
def d3logpdf_df3(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the third derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{3}\\log p(y|\\lambda(f))}{df^{3}} = \\frac{d^{3}\\log p(y|\\lambda(f)}{d\\lambda(f)^{3}}\\left(\\frac{d\\lambda(f)}{df}\\right)^{3} + 3\\frac{d^{2}\\log p(y|\\lambda(f)}{d\\lambda(f)^{2}}\\frac{d\\lambda(f)}{df}\\frac{d^{2}\\lambda(f)}{df^{2}} + \\frac{d\\log p(y|\\lambda(f)}{d\\lambda(f)}\\frac{d^{3}\\lambda(f)}{df^{3}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: third derivative of log likelihood evaluated for this point
:rtype: float
"""
inv_link_f = self.gp_link.transf(f)
d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d3link_df3 = self.gp_link.d3transf_df3(f)
return chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3)
def dlogpdf_dtheta(self, f, y, Y_metadata=None):
"""
TODO: Doc strings
"""
if self.size > 0:
inv_link_f = self.gp_link.transf(f)
return self.dlogpdf_link_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
else:
# There are no parameters so return an empty array for derivatives
return np.zeros([1, 0])
def dlogpdf_df_dtheta(self, f, y, Y_metadata=None):
"""
TODO: Doc strings
"""
if self.size > 0:
inv_link_f = self.gp_link.transf(f)
dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
return chain_1(dlogpdf_dlink_dtheta, dlink_df)
else:
# There are no parameters so return an empty array for derivatives
return np.zeros([f.shape[0], 0])
def d2logpdf_df2_dtheta(self, f, y, Y_metadata=None):
"""
TODO: Doc strings
"""
if self.size > 0:
inv_link_f = self.gp_link.transf(f)
dlink_df = self.gp_link.dtransf_df(f)
d2link_df2 = self.gp_link.d2transf_df2(f)
d2logpdf_dlink2_dtheta = self.d2logpdf_dlink2_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
return chain_2(d2logpdf_dlink2_dtheta, dlink_df, dlogpdf_dlink_dtheta, d2link_df2)
else:
# There are no parameters so return an empty array for derivatives
return np.zeros([f.shape[0], 0])
def _laplace_gradients(self, f, y, Y_metadata=None):
dlogpdf_dtheta = self.dlogpdf_dtheta(f, y, Y_metadata=Y_metadata)
dlogpdf_df_dtheta = self.dlogpdf_df_dtheta(f, y, Y_metadata=Y_metadata)
d2logpdf_df2_dtheta = self.d2logpdf_df2_dtheta(f, y, Y_metadata=Y_metadata)
#Parameters are stacked vertically. Must be listed in same order as 'get_param_names'
# ensure we have gradients for every parameter we want to optimize
assert len(dlogpdf_dtheta) == self.size #1 x num_param array
assert dlogpdf_df_dtheta.shape[1] == self.size #f x num_param matrix
assert d2logpdf_df2_dtheta.shape[1] == self.size #f x num_param matrix
return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
"""
Compute mean, variance of the predictive distibution.
:param mu: mean of the latent variable, f, of posterior
:param var: variance of the latent variable, f, of posterior
:param full_cov: whether to use the full covariance or just the diagonal
:type full_cov: Boolean
"""
pred_mean = self.predictive_mean(mu, var, Y_metadata)
pred_var = self.predictive_variance(mu, var, pred_mean, Y_metadata)
return pred_mean, pred_var
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
#compute the quantiles by sampling!!!
N_samp = 1000
s = np.random.randn(mu.shape[0], N_samp)*np.sqrt(var) + mu
#ss_f = s.flatten()
#ss_y = self.samples(ss_f, Y_metadata)
ss_y = self.samples(s, Y_metadata)
#ss_y = ss_y.reshape(mu.shape[0], N_samp)
return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles]
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
raise NotImplementedError
|
|
from Classification import classification_by_name
from Node import error # noqa: I201
from kinds import lowercase_first_word # noqa: I201
class Token(object):
"""
Represents the specification for a Token in the TokenSyntax file.
"""
def __init__(self, name, kind, serialization_code, unprefixed_kind=None,
text=None, classification='None', is_keyword=False):
self.name = name
self.kind = kind
if unprefixed_kind is None:
self.unprefixed_kind = kind
else:
self.unprefixed_kind = unprefixed_kind
self.serialization_code = serialization_code
self.text = text or ""
self.classification = classification_by_name(classification)
self.is_keyword = is_keyword
def swift_kind(self):
name = lowercase_first_word(self.name)
if self.is_keyword:
return name + 'Keyword'
return name
class Keyword(Token):
"""
Represents a keyword token.
"""
def __init__(self, name, text, serialization_code,
classification='Keyword'):
Token.__init__(self, name, 'kw_' + text, serialization_code,
unprefixed_kind=text, text=text,
classification=classification, is_keyword=True)
def macro_name(self):
return "KEYWORD"
class SwiftKeyword(Keyword):
def macro_name(self):
return "SWIFT_KEYWORD"
class DeclKeyword(SwiftKeyword):
def macro_name(self):
return "DECL_KEYWORD"
class StmtKeyword(SwiftKeyword):
def macro_name(self):
return "STMT_KEYWORD"
class ExprKeyword(SwiftKeyword):
def macro_name(self):
return "EXPR_KEYWORD"
class PatternKeyword(SwiftKeyword):
def macro_name(self):
return "PAT_KEYWORD"
class SilKeyword(Keyword):
def macro_name(self):
return "SIL_KEYWORD"
class PoundKeyword(Token):
def __init__(self, name, kind, text, serialization_code,
classification='Keyword'):
Token.__init__(self, name, 'pound_' + kind, serialization_code,
unprefixed_kind=kind, text=text,
classification=classification, is_keyword=True)
def macro_name(self):
return "POUND_KEYWORD"
class PoundObjectLiteral(PoundKeyword):
def __init__(self, name, kind, text, serialization_code, description,
protocol, classification='ObjectLiteral'):
PoundKeyword.__init__(self, name, kind, text, serialization_code,
classification)
self.description = description
self.protocol = protocol
def macro_name(self):
return "POUND_OBJECT_LITERAL"
class PoundConfig(PoundKeyword):
def macro_name(self):
return "POUND_CONFIG"
class PoundDirectiveKeyword(PoundKeyword):
def __init__(self, name, kind, text, serialization_code,
classification='PoundDirectiveKeyword'):
PoundKeyword.__init__(self, name, kind, text, serialization_code,
classification)
def macro_name(self):
return "POUND_DIRECTIVE_KEYWORD"
class PoundConditionalDirectiveKeyword(PoundDirectiveKeyword):
def __init__(self, name, kind, text, serialization_code,
classification='PoundDirectiveKeyword'):
PoundKeyword.__init__(self, name, kind, text, serialization_code,
classification)
def macro_name(self):
return "POUND_COND_DIRECTIVE_KEYWORD"
class Punctuator(Token):
def macro_name(self):
return "PUNCTUATOR"
class Literal(Token):
def macro_name(self):
return "LITERAL"
class Misc(Token):
def macro_name(self):
return "MISC"
SYNTAX_TOKENS = [
# Keywords that start decls
DeclKeyword('Associatedtype', 'associatedtype', serialization_code=1),
DeclKeyword('Class', 'class', serialization_code=2),
DeclKeyword('Deinit', 'deinit', serialization_code=3),
DeclKeyword('Enum', 'enum', serialization_code=4),
DeclKeyword('Extension', 'extension', serialization_code=5),
DeclKeyword('Func', 'func', serialization_code=6),
DeclKeyword('Import', 'import', serialization_code=7),
DeclKeyword('Init', 'init', serialization_code=8),
DeclKeyword('Inout', 'inout', serialization_code=9),
DeclKeyword('Let', 'let', serialization_code=10),
DeclKeyword('Operator', 'operator', serialization_code=11),
DeclKeyword('Precedencegroup', 'precedencegroup', serialization_code=12),
DeclKeyword('Protocol', 'protocol', serialization_code=13),
DeclKeyword('Struct', 'struct', serialization_code=14),
DeclKeyword('Subscript', 'subscript', serialization_code=15),
DeclKeyword('Typealias', 'typealias', serialization_code=16),
DeclKeyword('Var', 'var', serialization_code=17),
DeclKeyword('Fileprivate', 'fileprivate', serialization_code=18),
DeclKeyword('Internal', 'internal', serialization_code=19),
DeclKeyword('Private', 'private', serialization_code=20),
DeclKeyword('Public', 'public', serialization_code=21),
DeclKeyword('Static', 'static', serialization_code=22),
# Statement keywords
StmtKeyword('Defer', 'defer', serialization_code=23),
StmtKeyword('If', 'if', serialization_code=24),
StmtKeyword('Guard', 'guard', serialization_code=25),
StmtKeyword('Do', 'do', serialization_code=26),
StmtKeyword('Repeat', 'repeat', serialization_code=27),
StmtKeyword('Else', 'else', serialization_code=28),
StmtKeyword('For', 'for', serialization_code=29),
StmtKeyword('In', 'in', serialization_code=30),
StmtKeyword('While', 'while', serialization_code=31),
StmtKeyword('Return', 'return', serialization_code=32),
StmtKeyword('Break', 'break', serialization_code=33),
StmtKeyword('Continue', 'continue', serialization_code=34),
StmtKeyword('Fallthrough', 'fallthrough', serialization_code=35),
StmtKeyword('Switch', 'switch', serialization_code=36),
StmtKeyword('Case', 'case', serialization_code=37),
StmtKeyword('Default', 'default', serialization_code=38),
StmtKeyword('Where', 'where', serialization_code=39),
StmtKeyword('Catch', 'catch', serialization_code=40),
StmtKeyword('Throw', 'throw', serialization_code=50),
# Expression keywords
ExprKeyword('As', 'as', serialization_code=41),
ExprKeyword('Any', 'Any', serialization_code=42),
ExprKeyword('False', 'false', serialization_code=43),
ExprKeyword('Is', 'is', serialization_code=44),
ExprKeyword('Nil', 'nil', serialization_code=45),
ExprKeyword('Rethrows', 'rethrows', serialization_code=46),
ExprKeyword('Super', 'super', serialization_code=47),
ExprKeyword('Self', 'self', serialization_code=48),
ExprKeyword('CapitalSelf', 'Self', serialization_code=49),
ExprKeyword('True', 'true', serialization_code=51),
ExprKeyword('Try', 'try', serialization_code=52),
ExprKeyword('Throws', 'throws', serialization_code=53),
Keyword('__FILE__', '__FILE__', serialization_code=54),
Keyword('__LINE__', '__LINE__', serialization_code=55),
Keyword('__COLUMN__', '__COLUMN__', serialization_code=56),
Keyword('__FUNCTION__', '__FUNCTION__', serialization_code=57),
Keyword('__DSO_HANDLE__', '__DSO_HANDLE__', serialization_code=58),
# Pattern keywords
PatternKeyword('Wildcard', '_', serialization_code=59),
# Punctuators
Punctuator('LeftParen', 'l_paren', text='(', serialization_code=88),
Punctuator('RightParen', 'r_paren', text=')', serialization_code=89),
Punctuator('LeftBrace', 'l_brace', text='{', serialization_code=90),
Punctuator('RightBrace', 'r_brace', text='}', serialization_code=91),
Punctuator('LeftSquareBracket', 'l_square', text='[',
serialization_code=92),
Punctuator('RightSquareBracket', 'r_square', text=']',
serialization_code=93),
Punctuator('LeftAngle', 'l_angle', text='<', serialization_code=94),
Punctuator('RightAngle', 'r_angle', text='>', serialization_code=95),
Punctuator('Period', 'period', text='.', serialization_code=85),
Punctuator('PrefixPeriod', 'period_prefix', text='.',
serialization_code=87),
Punctuator('Comma', 'comma', text=',', serialization_code=84),
Punctuator('Ellipsis', 'ellipsis', text='...', serialization_code=118),
Punctuator('Colon', 'colon', text=':', serialization_code=82),
Punctuator('Semicolon', 'semi', text=';', serialization_code=83),
Punctuator('Equal', 'equal', text='=', serialization_code=86),
Punctuator('AtSign', 'at_sign', text='@', classification='Attribute',
serialization_code=80),
Punctuator('Pound', 'pound', text='#', serialization_code=81),
Punctuator('PrefixAmpersand', 'amp_prefix', text='&',
serialization_code=96),
Punctuator('Arrow', 'arrow', text='->', serialization_code=78),
Punctuator('Backtick', 'backtick', text='`', serialization_code=79),
Punctuator('Backslash', 'backslash', text='\\\\', serialization_code=100),
Punctuator('ExclamationMark', 'exclaim_postfix', text='!',
serialization_code=99),
Punctuator('PostfixQuestionMark', 'question_postfix', text='?',
serialization_code=97),
Punctuator('InfixQuestionMark', 'question_infix', text='?',
serialization_code=98),
Punctuator('StringQuote', 'string_quote', text='\\\"',
classification='StringLiteral', serialization_code=102),
Punctuator('SingleQuote', 'single_quote', text='\\\'',
classification='StringLiteral', serialization_code=120),
Punctuator('MultilineStringQuote', 'multiline_string_quote',
text='\\\"\\\"\\\"', classification='StringLiteral',
serialization_code=103),
# Keywords prefixed with a '#'.
PoundKeyword('PoundKeyPath', 'keyPath', text='#keyPath',
serialization_code=74),
PoundKeyword('PoundLine', 'line', text='#line',
serialization_code=69),
PoundKeyword('PoundSelector', 'selector', text='#selector',
serialization_code=73),
PoundKeyword('PoundFile', 'file', text='#file',
serialization_code=68),
PoundKeyword('PoundColumn', 'column', text='#column',
serialization_code=70),
PoundKeyword('PoundFunction', 'function', text='#function',
serialization_code=72),
PoundKeyword('PoundDsohandle', 'dsohandle', text='#dsohandle',
serialization_code=71),
PoundKeyword('PoundAssert', 'assert', text='#assert',
serialization_code=117),
PoundDirectiveKeyword('PoundSourceLocation', 'sourceLocation',
text='#sourceLocation', serialization_code=65),
PoundDirectiveKeyword('PoundWarning', 'warning', text='#warning',
serialization_code=66),
PoundDirectiveKeyword('PoundError', 'error', text='#error',
serialization_code=67),
PoundConditionalDirectiveKeyword('PoundIf', 'if', text='#if',
serialization_code=64),
PoundConditionalDirectiveKeyword('PoundElse', 'else', text='#else',
serialization_code=62),
PoundConditionalDirectiveKeyword('PoundElseif', 'elseif',
text='#elseif', serialization_code=63),
PoundConditionalDirectiveKeyword('PoundEndif', 'endif',
text='#endif', serialization_code=61),
PoundConfig('PoundAvailable', 'available', text='#available',
serialization_code=60),
PoundObjectLiteral('PoundFileLiteral', 'fileLiteral',
text='#fileLiteral', serialization_code=76,
description='file reference',
protocol='ExpressibleByFileReferenceLiteral'),
PoundObjectLiteral('PoundImageLiteral', 'imageLiteral',
text='#imageLiteral', serialization_code=77,
description='image',
protocol='ExpressibleByImageLiteral'),
PoundObjectLiteral('PoundColorLiteral', 'colorLiteral',
text='#colorLiteral', serialization_code=75,
description='color',
protocol='ExpressibleByColorLiteral'),
Literal('IntegerLiteral', 'integer_literal',
classification='IntegerLiteral', serialization_code=111),
Literal('FloatingLiteral', 'floating_literal',
classification='FloatingLiteral', serialization_code=112),
Literal('StringLiteral', 'string_literal',
classification='StringLiteral', serialization_code=113),
Misc('Unknown', 'unknown', serialization_code=115),
Misc('Identifier', 'identifier', classification=None,
serialization_code=105),
Misc('UnspacedBinaryOperator', 'oper_binary_unspaced',
serialization_code=107),
Misc('SpacedBinaryOperator', 'oper_binary_spaced', serialization_code=108),
Misc('PostfixOperator', 'oper_postfix', serialization_code=110),
Misc('PrefixOperator', 'oper_prefix', serialization_code=109),
Misc('DollarIdentifier', 'dollarident', classification='DollarIdentifier',
serialization_code=106),
Misc('ContextualKeyword', 'contextual_keyword', classification='Keyword',
serialization_code=114),
Misc('RawStringDelimiter', 'raw_string_delimiter', serialization_code=119),
Misc('StringSegment', 'string_segment', classification='StringLiteral',
serialization_code=104),
Misc('StringInterpolationAnchor', 'string_interpolation_anchor',
text=')', classification='StringInterpolationAnchor',
serialization_code=101),
Misc('Yield', 'kw_yield', serialization_code=116, text='yield'),
]
SYNTAX_TOKEN_MAP = {token.name + 'Token': token for token in SYNTAX_TOKENS}
def verify_no_duplicate_serialization_codes(tokens):
used_codes = set()
for token in tokens:
if token.serialization_code in used_codes:
error("Serialization code %d used twice for tokens" %
token.serialization_code)
used_codes.add(token.serialization_code)
verify_no_duplicate_serialization_codes(SYNTAX_TOKENS)
|
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" UniSpeechSat model configuration"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/unispeech_sat-base-960h": "https://huggingface.co/facebook/unispeech_sat-base-960h/resolve/main/config.json",
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UniSpeechSatConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`UniSpeechSatModel`]. It is used to instantiate an
UniSpeechSat model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the UniSpeechSat
[facebook/unispeech_sat-base-960h](https://huggingface.co/facebook/unispeech_sat-base-960h) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32):
Vocabulary size of the UniSpeechSat model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`UniSpeechSatModel`]. Vocabulary size of the model. Defines the
different tokens that can be represented by the *inputs_ids* passed to the forward method of
[`UniSpeechSatModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
final_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the final projection layer of [`UniSpeechSatForCTC`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the feature encoder.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for quantized feature encoder states.
conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
of *conv_stride* defines the number of convolutional layers and has to match the the length of *conv_dim*.
conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
length of *conv_kernel* defines the number of convolutional layers and has to match the the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
False` corresponds to applying layer norm after the attention layer.
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://arxiv.org/abs/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
step, irrespectively of `mask_feature_prob`. Only relevant if
''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
num_codevectors_per_group (`int`, *optional*, defaults to 320):
Number of entries in each quantization codebook (group).
num_codevector_groups (`int`, *optional*, defaults to 2):
Number of codevector groups for product codevector quantization.
contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
The temperature *kappa* in the contrastive loss.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for the output of the feature encoder that's used by the quantizer.
num_negatives (`int`, *optional*, defaults to 100):
Number of negative samples for the contrastive loss.
codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the quantized feature vectors.
proj_codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the final projection of both the quantized and the transformer features.
diversity_loss_weight (`int`, *optional*, defaults to 0.1):
The weight of the codebook diversity loss component.
ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`UniSpeechSatForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`UniSpeechSatForCTC`].
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`UniSpeechSatForSequenceClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification.
tdnn_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
tdnn_kernel (`Tuple[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
*XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
tdnn_dilation (`Tuple[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
*XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
xvector_output_dim (`int`, *optional*, defaults to 512):
Dimensionality of the *XVector* embedding vectors.
Example:
```python
>>> from transformers import UniSpeechSatModel, UniSpeechSatConfig
>>> # Initializing a UniSpeechSat facebook/unispeech_sat-base-960h style configuration
>>> configuration = UniSpeechSatConfig()
>>> # Initializing a model from the facebook/unispeech_sat-base-960h style configuration
>>> model = UniSpeechSatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "unispeech-sat"
def __init__(
self,
vocab_size=32,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout=0.1,
activation_dropout=0.1,
attention_dropout=0.1,
feat_proj_dropout=0.0,
feat_quantizer_dropout=0.0,
final_dropout=0.1,
layerdrop=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
feat_extract_norm="group",
feat_extract_activation="gelu",
conv_dim=(512, 512, 512, 512, 512, 512, 512),
conv_stride=(5, 2, 2, 2, 2, 2, 2),
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
conv_bias=False,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
do_stable_layer_norm=False,
apply_spec_augment=True,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
mask_feature_min_masks=0,
num_codevectors_per_group=320,
num_codevector_groups=2,
contrastive_logits_temperature=0.1,
num_negatives=100,
codevector_dim=256,
proj_codevector_dim=256,
diversity_loss_weight=0.1,
ctc_loss_reduction="mean",
ctc_zero_infinity=False,
use_weighted_layer_sum=False,
classifier_proj_size=256,
tdnn_dim=(512, 512, 512, 512, 1500),
tdnn_kernel=(5, 3, 3, 1, 1),
tdnn_dilation=(1, 2, 3, 1, 1),
xvector_output_dim=512,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
num_clusters=504,
**kwargs
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.num_clusters = num_clusters
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. "
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, "
f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) "
f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`."
)
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.feat_quantizer_dropout = feat_quantizer_dropout
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
# ctc loss
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
@property
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1)
|
|
from . import db
from markdown import markdown
from . import login_manager
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from flask import current_app, request
from . import db, login_manager
from itsdangerous import TimedJSONWebSignatureSerializer as Serialzer
from app.exceptions import ValidationError
from datetime import datetime
import hashlib
import bleach
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key =True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em','i','strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format = 'html'),
tags=allowed_tags, strip = True))
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
post_count = Post.query.count()
for i in range(count):
u = User.query.offset(randint(0,user_count - 1)).first()
p = Post.query.offset(randint(0,post_count - 1)).first()
c = Comment(body = forgery_py.lorem_ipsum.sentences(randint(1,5)),
timestamp = forgery_py.date.date(True),
author = u, post = p)
db.session.add(c)
db.session.commit()
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index = True, default = datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
body_html = db.Column(db.Text)
comments = db.relationship('Comment', backref='post', lazy='dynamic')
def to_json(self):
json_post = {
'url' : url_for('api.get_post', id =self.id, _external = True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id = self.author._id, _external = True),
'comments': url_for('api.get_post_comments', id = self.id, _external = True),
'comment_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body =='':
raise ValidationError('post does not have a body')
return Post(body = body)
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count=User.query.count()
for i in range(count):
u=User.query.offset(randint(0,user_count-1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1,5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol','pre','strong','ul','h1','h2','h3','p','h4',
'h5','h6']
target.body_html =bleach.linkify(bleach.clean(markdown(value, output_format='html'), tags=allowed_tags, strip=True))
db.event.listen(Post.body, 'set', Post.on_changed_body)
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key = True)
timestamp = db.Column(db.DateTime, default = datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String(64),unique = True, index = True)
username = db.Column(db.String(64), unique = True, index = True)
passwd_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
posts = db.relationship('Post',backref='author',lazy='dynamic')
comments = db.relationship('Comment', backref = 'author', lazy='dynamic')
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default = datetime.utcnow)
last_seen = db.Column(db.DateTime(), default = datetime.utcnow)
avatar_hash = db.Column(db.String(32))
followed = db.relationship('Follow', foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy = 'dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow', foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy = 'dynamic',
cascade='all, delete-orphan')
def __init__(self, **kwargs):
super(User,self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
if self.confirmed == True:
self.role = Role.query.filter_by(name='User').first()
else:
self.role = Role.query.filter_by(default = True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
# self.follow(self)
self.followed.append(Follow(followed=self))
def can(self, permissions):
return self.role is not None and\
( self.role.permissions & permissions ) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
def to_json(self):
json_user = {
'url': url_for('api.get_post', id =self.id ,_external = True),
'username' : self.username,
'menber_since': self.member_since,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id = self.id, _external = True),
'followed_posts': url_for('api.get_user_followed_posts', id = self.id, _external = True),
'post_count': self.posts.count()
}
return json_user
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id)\
.filter(Follow.follower_id == self.id)
@password.setter
def password(self, password):
self.passwd_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.passwd_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serialzer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm':self.id})
def confirm(self,token):
s = Serialzer( current_app.config['SECRET_KEY'] )
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
self.role = Role.query.filter_by(name='User').first()
db.session.add(self)
db.session.commit()
return True
def generate_change_email_token(self,new_email, expiration = 3600):
s = Serialzer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm':self.id, 'old_email':self.email, 'new_email':new_email})
def confirm_change_email(self, token):
s = Serialzer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id or data.get('old_email') != self.email:
return False
self.email = data.get('new_email')
self.avatar_hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
db.session.add(self)
db.session.commit()
return True
def generate_reset_token(self, expiration = 3600):
s = Serialzer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm':self.id, 'old_password':self.passwd_hash})
def confirm_reset_password(self,token):
s = Serialzer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id or data.get('old_password') != self.passwd_hash:
return False
return True
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url="https://secure.gravatar.com/avatar"
else:
url="http://www.gravatar.com/avatar"
hash = self.avatar_hash or hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(url=url, hash=hash, size=size,default=default,rating=rating)
def generate_auth_token(self, expiration = 3600):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in = expiration)
return s.dumps({'id':self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data.get('id'))
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
for j in range(i):
ut=User.query.offset(randint(0,user_count-1)).first()
if ut is not None:
u.follow(ut)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
db.session.commit()
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def follow(self,user):
if not self.is_following(user):
f = Follow(follower=self, followed=user)
db.session.add(f)
db.session.commit()
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
db.session.commit()
def is_following(self, user):
return self.followed.filter_by(followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(follower_id = user.id).first() is not None
def __repr__(self):
return '<User: %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
def is_anonymous(self):
return True
login_manager.anonymous_user = AnonymousUser
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64))
default = db.Column(db.Boolean, default = False, index = True)
permissions = db.Column(db.Integer)
users = db.relationship('User',backref='role',lazy = 'dynamic')
@staticmethod
def insert_roles():
roles = {
'Uncheck_user':(0x00, True),
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, False),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role: %r>' % self.name
class Permission:
FOLLOW = 0x01 # follow other users
COMMENT = 0x02 # comment on other users' articles
WRITE_ARTICLES = 0x04 # write articles
MODERATE_COMMENTS = 0x08 # moderate users' comments
ADMINISTER = 0x80 # administer
# anynomous 0x00 0b00000000 read only
# user 0x07 0b00000111 write articles, comment, follow ( default user )
# helper admin 0x0f 0b00001111 add moderation on normal user
# administer 0xff 0b11111111 all permissions
class Question(db.Model):
__tablename__ = 'questions'
ques = db.Column(db.String(1024))
id = db.Column(db.String(64))
quesid = db.Column(db.Integer, primary_key = True)
def __repr__(self):
return 'Ques from %r is : %r'%(self.id,self.ques)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
|
import numpy as np
from scipy.signal import ricker, cwt
from cued_datalogger.api.pyqt_extensions import MatplotlibCanvas
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QSlider, QLabel, QSpinBox, QHBoxLayout, QGridLayout, QComboBox
from PyQt5.QtCore import Qt
import sys
#--------------------------------
from scipy.signal import gausspulse
#import matplotlib.pyplot as plt
t = np.linspace(-1, 1, 200, endpoint=False)
sig = np.cos(2 * np.pi * 7 * t) + gausspulse(t - 0.4, fc=2)
widths = np.arange(1, 31)
"""
cwt_result = cwt(sig, ricker, widths)
T, W = np.meshgrid(t, widths)
plt.figure()
plt.contour(T, W, cwt_result)
plt.show()
#----------------------------------
"""
class CWTPlotWidget(MatplotlibCanvas):
"""A MatplotlibCanvas widget displaying the CWT plot"""
def __init__(self, sig, t, widths, wavelet=ricker, plot_type="Colourmap",
num_contours=5, contour_spacing_dB=5):
self.sig = sig
self.t = t
self.widths = widths
self.wavelet = wavelet
self.plot_type = plot_type
self.num_contours = num_contours
self.contour_spacing_dB = contour_spacing_dB
MatplotlibCanvas.__init__(self, "Continuous Wavelet Transform")
self.calculate_cwt()
self.draw_plot()
def draw_plot(self):
"""Redraw the CWT on the canvas"""
self.axes.clear()
if self.plot_type == "Contour":
self.update_contour_sequence()
self.axes.contour(self.T, self.W, self.cwt_result, self.contour_sequence)
self.axes.set_xlabel('Time (s)')
self.axes.set_ylabel('Wavelet width (?)')
self.axes.set_xlim(self.t.min(), self.t.max())
self.axes.set_ylim(self.widths.min(), self.widths.max())
elif self.plot_type == "Surface":
pass
elif self.plot_type == "Colourmap":
self.update_contour_sequence()
self.axes.pcolormesh(self.T, self.W, self.cwt_result, vmin=self.contour_sequence[0])
self.axes.set_xlabel('Time (s)')
self.axes.set_ylabel('Wavelet width (?)')
self.axes.set_xlim(self.t.min(), self.t.max())
self.axes.set_ylim(self.widths.min(), self.widths.max())
else:
pass
self.draw()
def update_attributes(self, value):
"""A slot for updating the attributes when input widgets are adjusted"""
# What sent the signal?
sender_name = self.sender().objectName()
if sender_name == "plot_type_combobox":
self.plot_type = value
elif sender_name == "contour_spacing_spinbox" or sender_name == "contour_spacing_slider":
self.contour_spacing_dB = value
elif sender_name == "num_contours_spinbox" or sender_name == "num_contours_slider":
self.num_contours = value
else:
print("Sender {} not implemented.".format(sender_name))
# Discard any other signal
pass
# Update the plot
self.calculate_cwt()
self.draw_plot()
def calculate_cwt(self):
"""Recalculate the CWT"""
self.cwt_result = cwt(self.sig, self.wavelet, self.widths)
self.T, self.W = np.meshgrid(self.t, self.widths)
def update_contour_sequence(self):
"""Update the array which says where to plot contours, how many etc"""
# Create a vector with the right spacing from min to max value
self.contour_sequence = np.arange(self.cwt_result.min(), self.cwt_result.max(),
self.contour_spacing_dB)
# Take the appropriate number of contours
self.contour_sequence = self.contour_sequence[-self.num_contours:]
class CWTWidget(QWidget):
def __init__(self, sig, t, widths=np.arange(1, 31), parent=None):
self.sig = sig
self.t = t
self.widths = widths
super().__init__()
self.init_ui()
def init_ui(self):
# Add a cwt plot
self.cwt_plot = CWTPlotWidget(self.sig, self.t, self.widths)
#------------Plot type controls------------
self.plot_type_label = QLabel(self)
self.plot_type_label.setText("Plot type")
# Create combobox
self.plot_type_combobox = QComboBox(self)
self.plot_type_combobox.addItems(["Colourmap", "Contour", "Surface"])
self.plot_type_combobox.setObjectName("plot_type_combobox")
# Update on change
self.plot_type_combobox.activated[str].connect(self.cwt_plot.update_attributes)
#------------Contour spacing controls------------
self.contour_spacing_label = QLabel(self)
self.contour_spacing_label.setText("Contour spacing")
# Create spinbox
self.contour_spacing_spinbox = QSpinBox(self)
self.contour_spacing_spinbox.setObjectName("contour_spacing_spinbox")
self.contour_spacing_spinbox.setRange(1, 12)
# Create slider
self.contour_spacing_slider = QSlider(Qt.Vertical, self)
self.contour_spacing_slider.setObjectName("contour_spacing_slider")
self.contour_spacing_slider.setRange(1, 12)
# Connect spinbox and slider together
self.contour_spacing_spinbox.valueChanged.connect(self.contour_spacing_slider.setValue)
self.contour_spacing_slider.valueChanged.connect(self.contour_spacing_spinbox.setValue)
# Set values
self.contour_spacing_spinbox.setValue(self.cwt_plot.contour_spacing_dB)
self.contour_spacing_slider.setValue(self.cwt_plot.contour_spacing_dB)
# Update screen on change
self.contour_spacing_slider.valueChanged.connect(self.cwt_plot.update_attributes)
self.contour_spacing_spinbox.valueChanged.connect(self.cwt_plot.update_attributes)
#------------Num contours controls------------
self.num_contours_label = QLabel(self)
self.num_contours_label.setText("Num contours")
# Create spinbox
self.num_contours_spinbox = QSpinBox(self)
self.num_contours_spinbox.setObjectName("num_contours_spinbox")
self.num_contours_spinbox.setRange(1, 12)
# Create slider
self.num_contours_slider = QSlider(Qt.Vertical, self)
self.num_contours_slider.setObjectName("num_contours_slider")
self.num_contours_slider.setRange(1, 12)
# Connect spinbox and slider together
self.num_contours_spinbox.valueChanged.connect(self.num_contours_slider.setValue)
self.num_contours_slider.valueChanged.connect(self.num_contours_spinbox.setValue)
# Set values
self.num_contours_spinbox.setValue(self.cwt_plot.num_contours)
self.num_contours_slider.setValue(self.cwt_plot.num_contours)
# Update screen on change
self.num_contours_slider.valueChanged.connect(self.cwt_plot.update_attributes)
self.num_contours_spinbox.valueChanged.connect(self.cwt_plot.update_attributes)
#------------Layout------------
# CWT controls:
self.cwt_controls_label = QLabel(self)
self.cwt_controls_label.setText("<b>CWT controls</b>")
cwt_controls = QGridLayout()
cwt_controls.addWidget(self.cwt_controls_label, 0, 0)
# Plot controls:
self.plot_controls_label = QLabel(self)
self.plot_controls_label.setText("<b>Plot controls</b>")
plot_controls = QGridLayout()
plot_controls.addWidget(self.plot_controls_label, 0, 0)
plot_controls.addWidget(self.contour_spacing_label, 1, 0)
plot_controls.addWidget(self.contour_spacing_spinbox, 2, 0)
plot_controls.addWidget(self.contour_spacing_slider, 3, 0)
plot_controls.addWidget(self.num_contours_label, 1, 1)
plot_controls.addWidget(self.num_contours_spinbox, 2, 1)
plot_controls.addWidget(self.num_contours_slider, 3, 1)
plot_controls.addWidget(self.plot_type_label, 4, 0)
plot_controls.addWidget(self.plot_type_combobox, 4, 1)
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(self.cwt_plot)
hbox.addLayout(plot_controls)
vbox.addLayout(hbox)
vbox.addLayout(cwt_controls)
self.setLayout(vbox)
self.setWindowTitle('CWT')
self.show()
if __name__ == '__main__':
app = 0
app = QApplication(sys.argv)
cwt_w = CWTWidget(sig, t)
sys.exit(app.exec_())
|
|
import logging
import numpy as np
from collections import OrderedDict
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.nnet.conv import conv2d, ConvOp
from theano.sandbox.cuda.blas import GpuCorrMM
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from blocks.bricks.cost import SquaredError
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.graph import add_annotation, Annotation
from blocks.roles import add_role, PARAMETER, WEIGHT, BIAS
from utils import shared_param, AttributeDict
from nn import maxpool_2d, global_meanpool_2d, BNPARAM
logger = logging.getLogger('main.model')
floatX = theano.config.floatX
class LadderAE():
def __init__(self, p):
self.p = p
self.init_weights_transpose = False
self.default_lr = p.lr
self.shareds = OrderedDict()
self.rstream = RandomStreams(seed=p.seed)
self.rng = np.random.RandomState(seed=p.seed)
n_layers = len(p.encoder_layers)
assert n_layers > 1, "Need to define encoder layers"
assert n_layers == len(p.denoising_cost_x), (
"Number of denoising costs does not match with %d layers: %s" %
(n_layers, str(p.denoising_cost_x)))
def one_to_all(x):
""" (5.,) -> 5 -> (5., 5., 5.)
('relu',) -> 'relu' -> ('relu', 'relu', 'relu')
"""
if type(x) is tuple and len(x) == 1:
x = x[0]
if type(x) is float:
x = (np.float32(x),) * n_layers
if type(x) is str:
x = (x,) * n_layers
return x
p.decoder_spec = one_to_all(p.decoder_spec)
p.f_local_noise_std = one_to_all(p.f_local_noise_std)
acts = one_to_all(p.get('act', 'relu'))
assert n_layers == len(p.decoder_spec), "f and g need to match"
assert (n_layers == len(acts)), (
"Not enough activations given. Requires %d. Got: %s" %
(n_layers, str(acts)))
acts = acts[:-1] + ('softmax',)
def parse_layer(spec):
""" 'fc:5' -> ('fc', 5)
'5' -> ('fc', 5)
5 -> ('fc', 5)
'convv:3:2:2' -> ('convv', [3,2,2])
"""
if type(spec) is not str:
return "fc", spec
spec = spec.split(':')
l_type = spec.pop(0) if len(spec) >= 2 else "fc"
spec = map(int, spec)
spec = spec[0] if len(spec) == 1 else spec
return l_type, spec
enc = map(parse_layer, p.encoder_layers)
self.layers = list(enumerate(zip(enc, p.decoder_spec, acts)))
def weight(self, init, name, cast_float32=True, for_conv=False):
weight = self.shared(init, name, cast_float32, role=WEIGHT)
if for_conv:
return weight.dimshuffle('x', 0, 'x', 'x')
return weight
def bias(self, init, name, cast_float32=True, for_conv=False):
b = self.shared(init, name, cast_float32, role=BIAS)
if for_conv:
return b.dimshuffle('x', 0, 'x', 'x')
return b
def shared(self, init, name, cast_float32=True, role=PARAMETER, **kwargs):
p = self.shareds.get(name)
if p is None:
p = shared_param(init, name, cast_float32, role, **kwargs)
self.shareds[name] = p
return p
def counter(self):
name = 'counter'
p = self.shareds.get(name)
update = []
if p is None:
p_max_val = np.float32(10)
p = self.shared(np.float32(1), name, role=BNPARAM)
p_max = self.shared(p_max_val, name + '_max', role=BNPARAM)
update = [(p, T.clip(p + np.float32(1), np.float32(0), p_max)),
(p_max, p_max)]
return (p, update)
def noise_like(self, x):
noise = self.rstream.normal(size=x.shape, avg=0.0, std=1.0)
return T.cast(noise, dtype=floatX)
def rand_init(self, in_dim, out_dim):
""" Random initialization for fully connected layers """
W = self.rng.randn(in_dim, out_dim) / np.sqrt(in_dim)
return W
def rand_init_conv(self, dim):
""" Random initialization for convolution filters """
fan_in = np.prod(dtype=floatX, a=dim[1:])
bound = np.sqrt(3. / max(1.0, (fan_in)))
W = np.asarray(
self.rng.uniform(low=-bound, high=bound, size=dim), dtype=floatX)
return W
def new_activation_dict(self):
return AttributeDict({'z': {}, 'h': {}, 's': {}, 'm': {}})
def annotate_update(self, update, tag_to):
a = Annotation()
for (var, up) in update:
a.updates[var] = up
add_annotation(tag_to, a)
def apply(self, input_labeled, target_labeled, input_unlabeled):
self.layer_counter = 0
input_dim = self.p.encoder_layers[0]
# Store the dimension tuples in the same order as layers.
layers = self.layers
self.layer_dims = {0: input_dim}
self.lr = self.shared(self.default_lr, 'learning_rate', role=None)
self.costs = costs = AttributeDict()
self.costs.denois = AttributeDict()
self.act = AttributeDict()
self.error = AttributeDict()
top = len(layers) - 1
N = input_labeled.shape[0]
self.join = lambda l, u: T.concatenate([l, u], axis=0)
self.labeled = lambda x: x[:N] if x is not None else x
self.unlabeled = lambda x: x[N:] if x is not None else x
self.split_lu = lambda x: (self.labeled(x), self.unlabeled(x))
input_concat = self.join(input_labeled, input_unlabeled)
def encoder(input_, path_name, input_noise_std=0, noise_std=[]):
h = input_
logger.info(' 0: noise %g' % input_noise_std)
if input_noise_std > 0.:
h = h + self.noise_like(h) * input_noise_std
d = AttributeDict()
d.unlabeled = self.new_activation_dict()
d.labeled = self.new_activation_dict()
d.labeled.z[0] = self.labeled(h)
d.unlabeled.z[0] = self.unlabeled(h)
prev_dim = input_dim
for i, (spec, _, act_f) in layers[1:]:
d.labeled.h[i - 1], d.unlabeled.h[i - 1] = self.split_lu(h)
noise = noise_std[i] if i < len(noise_std) else 0.
curr_dim, z, m, s, h = self.f(h, prev_dim, spec, i, act_f,
path_name=path_name,
noise_std=noise)
assert self.layer_dims.get(i) in (None, curr_dim)
self.layer_dims[i] = curr_dim
d.labeled.z[i], d.unlabeled.z[i] = self.split_lu(z)
d.unlabeled.s[i] = s
d.unlabeled.m[i] = m
prev_dim = curr_dim
d.labeled.h[i], d.unlabeled.h[i] = self.split_lu(h)
return d
# Clean, supervised
logger.info('Encoder: clean, labeled')
clean = self.act.clean = encoder(input_concat, 'clean')
# Corrupted, supervised
logger.info('Encoder: corr, labeled')
corr = self.act.corr = encoder(input_concat, 'corr',
input_noise_std=self.p.super_noise_std,
noise_std=self.p.f_local_noise_std)
est = self.act.est = self.new_activation_dict()
# Decoder path in opposite order
logger.info('Decoder: z_corr -> z_est')
for i, ((_, spec), l_type, act_f) in layers[::-1]:
z_corr = corr.unlabeled.z[i]
z_clean = clean.unlabeled.z[i]
z_clean_s = clean.unlabeled.s.get(i)
z_clean_m = clean.unlabeled.m.get(i)
fspec = layers[i+1][1][0] if len(layers) > i+1 else (None, None)
if i == top:
ver = corr.unlabeled.h[i]
ver_dim = self.layer_dims[i]
top_g = True
else:
ver = est.z.get(i + 1)
ver_dim = self.layer_dims.get(i + 1)
top_g = False
z_est = self.g(z_lat=z_corr,
z_ver=ver,
in_dims=ver_dim,
out_dims=self.layer_dims[i],
l_type=l_type,
num=i,
fspec=fspec,
top_g=top_g)
if z_est is not None:
# Denoising cost
if z_clean_s and self.p.zestbn == 'bugfix':
z_est_norm = (z_est - z_clean_m) / T.sqrt(z_clean_s + np.float32(1e-10))
elif z_clean_s is None or self.p.zestbn == 'no':
z_est_norm = z_est
else:
assert False, 'Not supported path'
se = SquaredError('denois' + str(i))
costs.denois[i] = se.apply(z_est_norm.flatten(2),
z_clean.flatten(2)) \
/ np.prod(self.layer_dims[i], dtype=floatX)
costs.denois[i].name = 'denois' + str(i)
denois_print = 'denois %.2f' % self.p.denoising_cost_x[i]
else:
denois_print = ''
# Store references for later use
est.h[i] = self.apply_act(z_est, act_f)
est.z[i] = z_est
est.s[i] = None
est.m[i] = None
logger.info(' g%d: %10s, %s, dim %s -> %s' % (
i, l_type,
denois_print,
self.layer_dims.get(i+1),
self.layer_dims.get(i)
))
# Costs
y = target_labeled.flatten()
costs.class_clean = CategoricalCrossEntropy().apply(y, clean.labeled.h[top])
costs.class_clean.name = 'cost_class_clean'
costs.class_corr = CategoricalCrossEntropy().apply(y, corr.labeled.h[top])
costs.class_corr.name = 'cost_class_corr'
# This will be used for training
costs.total = costs.class_corr * 1.0
for i in range(top + 1):
if costs.denois.get(i) and self.p.denoising_cost_x[i] > 0:
costs.total += costs.denois[i] * self.p.denoising_cost_x[i]
costs.total.name = 'cost_total'
# Classification error
mr = MisclassificationRate()
self.error.clean = mr.apply(y, clean.labeled.h[top]) * np.float32(100.)
self.error.clean.name = 'error_rate_clean'
def apply_act(self, input, act_name):
if input is None:
return input
act = {
'relu': lambda x: T.maximum(0, x),
'leakyrelu': lambda x: T.switch(x > 0., x, 0.1 * x),
'linear': lambda x: x,
'softplus': lambda x: T.log(1. + T.exp(x)),
'sigmoid': lambda x: T.nnet.sigmoid(x),
'softmax': lambda x: T.nnet.softmax(x),
}.get(act_name)
assert act, 'unknown act %s' % act_name
if act_name == 'softmax':
input = input.flatten(2)
return act(input)
def annotate_bn(self, var, id, var_type, mb_size, size, norm_ax):
var_shape = np.array((1,) + size)
out_dim = np.prod(var_shape) / np.prod(var_shape[list(norm_ax)])
# Flatten the var - shared variable updating is not trivial otherwise,
# as theano seems to believe a row vector is a matrix and will complain
# about the updates
orig_shape = var.shape
var = var.flatten()
# Here we add the name and role, the variables will later be identified
# by these values
var.name = id + '_%s_clean' % var_type
add_role(var, BNPARAM)
shared_var = self.shared(np.zeros(out_dim),
name='shared_%s' % var.name, role=None)
# Update running average estimates. When the counter is reset to 1, it
# will clear its memory
cntr, c_up = self.counter()
one = np.float32(1)
run_avg = lambda new, old: one / cntr * new + (one - one / cntr) * old
if var_type == 'mean':
new_value = run_avg(var, shared_var)
elif var_type == 'var':
mb_size = T.cast(mb_size, 'float32')
new_value = run_avg(mb_size / (mb_size - one) * var, shared_var)
else:
raise NotImplemented('Unknown batch norm var %s' % var_type)
# Add the counter update to the annotated update if it is the first
# instance of a counter
self.annotate_update([(shared_var, new_value)] + c_up, var)
return var.reshape(orig_shape)
def f(self, h, in_dim, spec, num, act_f, path_name, noise_std=0):
assert path_name in ['clean', 'corr']
# Generates identifiers used for referencing shared variables.
# E.g. clean and corrupted encoders will end up using the same
# variable name and hence sharing parameters
gen_id = lambda s: '_'.join(['f', str(num), s])
layer_type, _ = spec
# Pooling
if layer_type in ['maxpool', 'globalmeanpool']:
z, output_size = self.f_pool(h, spec, in_dim)
norm_ax = (0, -2, -1)
# after pooling, no activation func for now unless its softmax
act_f = "linear" if act_f != "softmax" else act_f
# Convolution
elif layer_type in ['convv', 'convf']:
z, output_size = self.f_conv(h, spec, in_dim, gen_id('W'))
norm_ax = (0, -2, -1)
# Fully connected
elif layer_type == "fc":
h = h.flatten(2) if h.ndim > 2 else h
_, dim = spec
W = self.weight(self.rand_init(np.prod(in_dim), dim), gen_id('W'))
z, output_size = T.dot(h, W), (dim,)
norm_ax = (0,)
else:
raise ValueError("Unknown layer spec: %s" % layer_type)
m = s = None
is_normalizing = True
if is_normalizing:
keep_dims = True
z_l = self.labeled(z)
z_u = self.unlabeled(z)
m = z_u.mean(norm_ax, keepdims=keep_dims)
s = z_u.var(norm_ax, keepdims=keep_dims)
m_l = z_l.mean(norm_ax, keepdims=keep_dims)
s_l = z_l.var(norm_ax, keepdims=keep_dims)
if path_name == 'clean':
# Batch normalization estimates the mean and variance of
# validation and test sets based on the training set
# statistics. The following annotates the computation of
# running average to the graph.
m_l = self.annotate_bn(m_l, gen_id('bn'), 'mean', z_l.shape[0],
output_size, norm_ax)
s_l = self.annotate_bn(s_l, gen_id('bn'), 'var', z_l.shape[0],
output_size, norm_ax)
z = self.join(
(z_l - m_l) / T.sqrt(s_l + np.float32(1e-10)),
(z_u - m) / T.sqrt(s + np.float32(1e-10)))
if noise_std > 0:
z += self.noise_like(z) * noise_std
# z for lateral connection
z_lat = z
b_init, c_init = 0.0, 1.0
b_c_size = output_size[0]
# Add bias
if act_f != 'linear':
z += self.bias(b_init * np.ones(b_c_size), gen_id('b'),
for_conv=len(output_size) > 1)
if is_normalizing:
# Add free parameter (gamma in original Batch Normalization paper)
# if needed by the activation. For instance ReLU does't need one
# and we only add it to softmax if hyperparameter top_c is set.
if (act_f not in ['relu', 'leakyrelu', 'linear', 'softmax'] or
(act_f == 'softmax' and self.p.top_c is True)):
c = self.weight(c_init * np.ones(b_c_size), gen_id('c'),
for_conv=len(output_size) > 1)
z *= c
h = self.apply_act(z, act_f)
logger.info(' f%d: %s, %s,%s noise %.2f, params %s, dim %s -> %s' % (
num, layer_type, act_f, ' BN,' if is_normalizing else '',
noise_std, spec[1], in_dim, output_size))
return output_size, z_lat, m, s, h
def f_pool(self, x, spec, in_dim):
layer_type, dims = spec
num_filters = in_dim[0]
if "globalmeanpool" == layer_type:
y, output_size = global_meanpool_2d(x, num_filters)
# scale the variance to match normal conv layers with xavier init
y = y * np.float32(in_dim[-1]) * np.float32(np.sqrt(3))
else:
assert dims[0] != 1 or dims[1] != 1
y, output_size = maxpool_2d(x, in_dim,
poolsize=(dims[1], dims[1]),
poolstride=(dims[0], dims[0]))
return y, output_size
def f_conv(self, x, spec, in_dim, weight_name):
layer_type, dims = spec
num_filters = dims[0]
filter_size = (dims[1], dims[1])
stride = (dims[2], dims[2])
bm = 'full' if 'convf' in layer_type else 'valid'
num_channels = in_dim[0]
W = self.weight(self.rand_init_conv(
(num_filters, num_channels) + filter_size), weight_name)
if stride != (1, 1):
f = GpuCorrMM(subsample=stride, border_mode=bm, pad=(0, 0))
y = f(gpu_contiguous(x), gpu_contiguous(W))
else:
assert self.p.batch_size == self.p.valid_batch_size
y = conv2d(x, W, image_shape=(2*self.p.batch_size, ) + in_dim,
filter_shape=((num_filters, num_channels) +
filter_size), border_mode=bm)
output_size = ((num_filters,) +
ConvOp.getOutputShape(in_dim[1:], filter_size,
stride, bm))
return y, output_size
def g(self, z_lat, z_ver, in_dims, out_dims, l_type, num, fspec, top_g):
f_layer_type, dims = fspec
is_conv = f_layer_type is not None and ('conv' in f_layer_type or
'pool' in f_layer_type)
gen_id = lambda s: '_'.join(['g', str(num), s])
in_dim = np.prod(dtype=floatX, a=in_dims)
out_dim = np.prod(dtype=floatX, a=out_dims)
num_filters = out_dims[0] if is_conv else out_dim
if l_type[-1] in ['0']:
g_type, u_type = l_type[:-1], l_type[-1]
else:
g_type, u_type = l_type, None
# Mapping from layer above: u
if u_type in ['0'] or z_ver is None:
if z_ver is None and u_type not in ['0']:
logger.warn('Decoder %d:%s without vertical input' %
(num, g_type))
u = None
else:
if top_g:
u = z_ver
elif is_conv:
u = self.g_deconv(z_ver, in_dims, out_dims, gen_id('W'), fspec)
else:
W = self.weight(self.rand_init(in_dim, out_dim), gen_id('W'))
u = T.dot(z_ver, W)
# Batch-normalize u
if u is not None:
norm_ax = (0,) if u.ndim <= 2 else (0, -2, -1)
keep_dims = True
u -= u.mean(norm_ax, keepdims=keep_dims)
u /= T.sqrt(u.var(norm_ax, keepdims=keep_dims) +
np.float32(1e-10))
# Define the g function
if not is_conv:
z_lat = z_lat.flatten(2)
bi = lambda inits, name: self.bias(inits * np.ones(num_filters),
gen_id(name), for_conv=is_conv)
wi = lambda inits, name: self.weight(inits * np.ones(num_filters),
gen_id(name), for_conv=is_conv)
if g_type == '':
z_est = None
elif g_type == 'i':
z_est = z_lat
elif g_type in ['sig']:
sigval = bi(0., 'c1') + wi(1., 'c2') * z_lat
if u is not None:
sigval += wi(0., 'c3') * u + wi(0., 'c4') * z_lat * u
sigval = T.nnet.sigmoid(sigval)
z_est = bi(0., 'a1') + wi(1., 'a2') * z_lat + wi(1., 'b1') * sigval
if u is not None:
z_est += wi(0., 'a3') * u + wi(0., 'a4') * z_lat * u
elif g_type in ['lin']:
a1 = wi(1.0, 'a1')
b = bi(0.0, 'b')
z_est = a1 * z_lat + b
elif g_type in ['relu']:
assert u is not None
b = bi(0., 'b')
x = u + b
z_est = self.apply_act(x, 'relu')
elif g_type in ['sigmoid']:
assert u is not None
b = bi(0., 'b')
c = wi(1., 'c')
z_est = self.apply_act((u + b) * c, 'sigmoid')
elif g_type in ['comparison_g2']:
# sig without the uz cross term
sigval = bi(0., 'c1') + wi(1., 'c2') * z_lat
if u is not None:
sigval += wi(0., 'c3') * u
sigval = T.nnet.sigmoid(sigval)
z_est = bi(0., 'a1') + wi(1., 'a2') * z_lat + wi(1., 'b1') * sigval
if u is not None:
z_est += wi(0., 'a3') * u
elif g_type in ['comparison_g3']:
# sig without the sigmoid nonlinearity
z_est = bi(0., 'a1') + wi(1., 'a2') * z_lat
if u is not None:
z_est += wi(0., 'a3') * u + wi(0., 'a4') * z_lat * u
elif g_type in ['comparison_g4']:
# No mixing between z_lat and u before final sum, otherwise similar
# to sig
def nonlin(inp, in_name='input', add_bias=True):
w1 = wi(1., 'w1_%s' % in_name)
b1 = bi(0., 'b1')
w2 = wi(1., 'w2_%s' % in_name)
b2 = bi(0., 'b2') if add_bias else 0
w3 = wi(0., 'w3_%s' % in_name)
return w2 * T.nnet.sigmoid(b1 + w1 * inp) + w3 * inp + b2
z_est = nonlin(z_lat, 'lat') if u is None else \
nonlin(z_lat, 'lat') + nonlin(u, 'ver', False)
elif g_type in ['comparison_g5', 'gauss']:
# Gaussian assumption on z: (z - mu) * v + mu
if u is None:
b1 = bi(0., 'b1')
w1 = wi(1., 'w1')
z_est = w1 * z_lat + b1
else:
a1 = bi(0., 'a1')
a2 = wi(1., 'a2')
a3 = bi(0., 'a3')
a4 = bi(0., 'a4')
a5 = bi(0., 'a5')
a6 = bi(0., 'a6')
a7 = wi(1., 'a7')
a8 = bi(0., 'a8')
a9 = bi(0., 'a9')
a10 = bi(0., 'a10')
mu = a1 * T.nnet.sigmoid(a2 * u + a3) + a4 * u + a5
v = a6 * T.nnet.sigmoid(a7 * u + a8) + a9 * u + a10
z_est = (z_lat - mu) * v + mu
else:
raise NotImplementedError("unknown g type: %s" % str(g_type))
# Reshape the output if z is for conv but u from fc layer
if (z_est is not None and type(out_dims) == tuple and
len(out_dims) > 1.0 and z_est.ndim < 4):
z_est = z_est.reshape((z_est.shape[0],) + out_dims)
return z_est
def g_deconv(self, z_ver, in_dims, out_dims, weight_name, fspec):
""" Inverse operation for each type of f used in convnets """
f_type, f_dims = fspec
assert z_ver is not None
num_channels = in_dims[0] if in_dims is not None else None
num_filters, width, height = out_dims[:3]
if f_type in ['globalmeanpool']:
u = T.addbroadcast(z_ver, 2, 3)
assert in_dims[1] == 1 and in_dims[2] == 1, \
"global pooling needs in_dims (1,1): %s" % str(in_dims)
elif f_type in ['maxpool']:
sh, str, size = z_ver.shape, f_dims[0], f_dims[1]
assert str == size, "depooling requires stride == size"
u = T.zeros((sh[0], sh[1], sh[2] * str, sh[3] * str),
dtype=z_ver.dtype)
for x in xrange(str):
for y in xrange(str):
u = T.set_subtensor(u[:, :, x::str, y::str], z_ver)
u = u[:, :, :width, :height]
elif f_type in ['convv', 'convf']:
filter_size, str = (f_dims[1], f_dims[1]), f_dims[2]
W_shape = (num_filters, num_channels) + filter_size
W = self.weight(self.rand_init_conv(W_shape), weight_name)
if str > 1:
# upsample if strided version
sh = z_ver.shape
u = T.zeros((sh[0], sh[1], sh[2] * str, sh[3] * str),
dtype=z_ver.dtype)
u = T.set_subtensor(u[:, :, ::str, ::str], z_ver)
else:
u = z_ver # no strides, only deconv
u = conv2d(u, W, filter_shape=W_shape,
border_mode='valid' if 'convf' in f_type else 'full')
u = u[:, :, :width, :height]
else:
raise NotImplementedError('Layer %s has no convolutional decoder'
% f_type)
return u
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient import exc as heat_exc
from oslo_config import cfg
from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LW
from sahara.service import engine as e
from sahara.service.heat import templates as ht
from sahara.service import volumes
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import general as g
from sahara.utils.openstack import base as b
from sahara.utils.openstack import heat
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CREATE_STAGES = ["Spawning", "Waiting", "Preparing"]
SCALE_STAGES = ["Scaling: Spawning", "Scaling: Waiting", "Scaling: Preparing"]
ROLLBACK_STAGES = ["Rollback: Spawning", "Rollback: Waiting",
"Rollback: Preparing"]
class HeatEngine(e.Engine):
def get_type_and_version(self):
return "heat.3.0"
def create_cluster(self, cluster):
self._update_rollback_strategy(cluster, shutdown=True)
target_count = self._get_ng_counts(cluster)
self._nullify_ng_counts(cluster)
self._launch_instances(cluster, target_count, CREATE_STAGES)
self._update_rollback_strategy(cluster)
def _get_ng_counts(self, cluster):
count = {}
for node_group in cluster.node_groups:
count[node_group.id] = node_group.count
return count
def _nullify_ng_counts(self, cluster):
ctx = context.ctx()
for node_group in cluster.node_groups:
conductor.node_group_update(ctx, node_group, {"count": 0})
def scale_cluster(self, cluster, target_count):
ctx = context.ctx()
rollback_count = self._get_ng_counts(cluster)
self._update_rollback_strategy(cluster, rollback_count=rollback_count,
target_count=target_count)
inst_ids = self._launch_instances(
cluster, target_count, SCALE_STAGES,
update_stack=True, disable_rollback=False)
cluster = conductor.cluster_get(ctx, cluster)
g.clean_cluster_from_empty_ng(cluster)
self._update_rollback_strategy(cluster)
return inst_ids
def rollback_cluster(self, cluster, reason):
rollback_info = cluster.rollback_info or {}
self._update_rollback_strategy(cluster)
if rollback_info.get('shutdown', False):
self._rollback_cluster_creation(cluster, reason)
LOG.warning(_LW("Cluster creation rollback "
"(reason: {reason})").format(reason=reason))
return False
rollback_count = rollback_info.get('rollback_count', {}).copy()
target_count = rollback_info.get('target_count', {}).copy()
if rollback_count or target_count:
self._rollback_cluster_scaling(
cluster, rollback_count, target_count, reason)
LOG.warning(_LW("Cluster scaling rollback "
"(reason: {reason})").format(reason=reason))
return True
return False
def _update_rollback_strategy(self, cluster, shutdown=False,
rollback_count=None, target_count=None):
rollback_info = {}
if shutdown:
rollback_info['shutdown'] = shutdown
if rollback_count:
rollback_info['rollback_count'] = rollback_count
if target_count:
rollback_info['target_count'] = target_count
cluster = conductor.cluster_update(
context.ctx(), cluster, {'rollback_info': rollback_info})
return cluster
def _populate_cluster(self, cluster, stack):
ctx = context.ctx()
old_ids = [i.instance_id for i in g.get_instances(cluster)]
new_ids = []
for node_group in cluster.node_groups:
instances = stack.get_node_group_instances(node_group)
for instance in instances:
nova_id = instance['physical_id']
name = instance['name']
if nova_id not in old_ids:
instance_id = conductor.instance_add(
ctx, node_group, {"instance_id": nova_id,
"instance_name": name})
new_ids.append(instance_id)
return new_ids
def _rollback_cluster_creation(self, cluster, ex):
"""Shutdown all instances and update cluster status."""
self.shutdown_cluster(cluster)
def _rollback_cluster_scaling(self, cluster, rollback_count,
target_count, ex):
"""Attempt to rollback cluster scaling.
Our rollback policy for scaling is as follows:
We shut down nodes created during scaling, but we don't try to
to get back decommissioned nodes. I.e. during the rollback
we only shut down nodes and not launch them. That approach should
maximize the chance of rollback success.
"""
for ng in rollback_count:
if rollback_count[ng] > target_count[ng]:
rollback_count[ng] = target_count[ng]
self._launch_instances(cluster, rollback_count, ROLLBACK_STAGES,
update_stack=True)
def shutdown_cluster(self, cluster):
"""Shutdown specified cluster and all related resources."""
try:
b.execute_with_retries(heat.client().stacks.delete, cluster.name)
stack = heat.get_stack(cluster.name)
heat.wait_stack_completion(stack)
except heat_exc.HTTPNotFound:
LOG.warning(_LW('Did not find stack for cluster. Trying to delete '
'cluster manually.'))
# Stack not found. Trying to delete cluster like direct engine
# do it
self._shutdown_instances(cluster)
self._delete_aa_server_group(cluster)
self._clean_job_executions(cluster)
self._remove_db_objects(cluster)
@cpo.event_wrapper(
True, step=_('Create Heat stack'), param=('cluster', 1))
def _create_instances(self, cluster, target_count, update_stack=False,
disable_rollback=True):
stack = ht.ClusterStack(cluster)
self._update_instance_count(stack, cluster, target_count)
stack.instantiate(update_existing=update_stack,
disable_rollback=disable_rollback)
heat.wait_stack_completion(stack.heat_stack)
return self._populate_cluster(cluster, stack)
def _launch_instances(self, cluster, target_count, stages,
update_stack=False, disable_rollback=True):
# create all instances
cluster = g.change_cluster_status(cluster, stages[0])
inst_ids = self._create_instances(
cluster, target_count, update_stack, disable_rollback)
# wait for all instances are up and networks ready
cluster = g.change_cluster_status(cluster, stages[1])
instances = g.get_instances(cluster, inst_ids)
self._await_networks(cluster, instances)
# prepare all instances
cluster = g.change_cluster_status(cluster, stages[2])
instances = g.get_instances(cluster, inst_ids)
volumes.mount_to_instances(instances)
self._configure_instances(cluster)
return inst_ids
def _update_instance_count(self, stack, cluster, target_count):
ctx = context.ctx()
for node_group in cluster.node_groups:
count = target_count[node_group.id]
stack.add_node_group_extra(node_group.id, count,
self._generate_user_data_script)
# if number of instances decreases, we need to drop
# the excessive ones
for i in range(count, node_group.count):
conductor.instance_remove(ctx, node_group.instances[i])
|
|
#For use with python 3.x.
# Program only checks xml files in the directory that contains this python file.
# Delete all csv files before running script. Script appends to csv files of
# the same name if they already exist.
# libraries
from bs4 import BeautifulSoup # select XML tags and parse text
from os import walk, getcwd, listdir # used to grab all files in directory of script (c.f. line 29)
from lxml import etree # using xpath to compare <quote> and <epigraph> tags in order to identify files that may have epigraphs but have not been properly tagged to indicate this
import os # used to split off filename root from filename extension (c.f. line 31)
import csv # interact with csv files
import re # use regular expressions (for parsing author birth/death dates from author names)
#import sys # take input from command line (in future versions?)
## GLOBAL VARIABLES & FUNCTIONS
totalEpigraphCount = 0 #number of epigraphs in xml files in corpus
epigraphlessFileCount = 0 #number of xml files in corpus that do not have epigraphs
def count_tags(path, tag):
with open(path) as xml:
xml_parsed = etree.parse(xml)
epigraph_location = xml_parsed.xpath("//tei:" + tag, namespaces = {"tei" : "http://www.tei-c.org/ns/1.0"})
return len(epigraph_location)
def count_nested_tags(path, child_tag, ancestor_tag):
with open(path) as xml:
xml_parsed = etree.parse(xml)
child_in_ancestor = xml_parsed.xpath("//tei:" + child_tag + "/ancestor::tei:" + ancestor_tag, namespaces = {"tei" : "http://www.tei-c.org/ns/1.0"})
return len(child_in_ancestor)
def remove_characters(listofstrings, characters_to_be_removed):
for string in range(0,len(listofstrings)):
cleaned_text = ""
for character in listofstrings[string]:
if character not in characters_to_be_removed:
cleaned_text += character
listofstrings[string] = cleaned_text
return listofstrings
## COLLECTING INFORMATION FROM CORPUS
allFilesInDirectory = [ filename for filename in listdir(getcwd()) if filename.endswith('.xml')] #get filenames in current directory ending in ".xml"
for document in range(0, len(allFilesInDirectory)): # Loop through all files in directory
root, ext = os.path.splitext(allFilesInDirectory[document]) # Select file extension for particular file "x" in the list "allFilesInDirectory"
if (ext == '.xml'): # If file ends in ".xml", read file. Skip file otherwise.
# open file to be read
readfile = open(str(allFilesInDirectory[document])) # Specify file to be read & open file
soup = BeautifulSoup(readfile, "lxml") # Make "soup" object of file to search
# collect novel author, title of novel, pub date, epigraph, epigraph attrib, pub location, publisher, & encoding company from individual file
author_list = [author.text for author in soup('author')] # (1) collect text "author" entries (&, if present, birth/death year)
# identify author birth & death years by scraping from author name, clean up author name entry
birthDeathYears = [] # years extracted from author_list will be placed here
authorBirthYear = 'No Birth Year' # Birth year OR Year if only a single year is provided
authorDeathYear = 'No Death Year' # Death year
birthDeathYears = re.findall('\d{4}', author_list[0]) # scrape years from author name entry if present
birthDeathYears = [int(string) for string in birthDeathYears] # convert years from strings to integers
if len(birthDeathYears) >= 3: # does file have three or more years in author line?
print('WARNING: ' + str(len(BirthDeathYears)) + ' years in author line in ' + root)
authorBirthYear = 'Too many years' # too many years, not sure which is birth or death
authorDeathYear = 'Too many years'
elif len(birthDeathYears) == 2: # or two years?
authorBirthYear = str(min(birthDeathYears[0],birthDeathYears[1])) # birth year
authorDeathYear = str(max(birthDeathYears[0],birthDeathYears[1])) # death year
elif len(birthDeathYears) == 1:
authorBirthYear = str(birthDeathYears[0]) # dump single year into birth year
authorDeathYear = '????' # remind ourselves that we don't know if this year is birth or death with '????'
# find, extract, and remove birth/death year in parentheses from author name
inParensToRemove = re.findall(r'\((.+)\)', author_list[0])
if inParensToRemove:
toRemove = ' ('+ inParensToRemove[0] + ')'
author_list[0] = author_list[0].replace(toRemove,"")
# find, extract, and remove birth/death year from author name if no parentheses exist
noParensToRemove = re.findall('\d{4}-\d{4}', author_list[0])
if noParensToRemove:
toRemove = noParensToRemove[0]
author_list[0] = author_list[0].replace(toRemove,"")
#remove trailing commas or white space in author name if these symbols are present, starting from *last* symbol in author name
errorcounter = 0
while author_list[0][-1:] == ' ' or author_list[0][-1:] == ',' or author_list[0][-1:] == '\n':
errorcounter = errorcounter + 1
if errorcounter > 8:
print('ERROR: ' + root + 'author name cleaning stalled. Check file.')
break
author_list[0] = author_list[0][:-1]
title_list = [title.text for title in soup('title')] # (2) collect text "title" entries
publication_date = [date.text for date in soup('date')] # (3) collect text pub year entries
if "eaf" in root: ### select correct year depending on EAF or Wright corpus. Throw warning if not one of these two corpora.
pub_year = str(publication_date[1]) ### pick 2nd date tag for EAF corpus
else:
if "VAC" in root:
pub_year = str(publication_date[0]) ### pick 1st date tag for Wright corpus
else:
pub_year = 'Unknown Corpus, see terminal warning' ### WARNING: user must check pub year entry
print('WARNING: Check publication year for file ' + root +'.'+ ext + '\n')
print('List of publication dates in file: \n')
print(publication_date)
publication_place = [pubplace.text for pubplace in soup('pubplace')] #(4) collect text pub location
if len(soup('epigraph')) > 0: #(5) collect entries tagged "epigraph"
epigraph_list = [epigraph.text for epigraph in soup('epigraph')]
epigraph_attribution = ["No Attribution" if soup('epigraph')[epigraphs].bibl == None \
else soup('epigraph')[epigraphs].bibl.text \
for epigraphs in range(0,len(soup('epigraph')))] #(6) collect epigraph attributions
##see how many quote tags are nested in epigraph tags (for error checking; c.f. line 111)
if bool(soup('epigraph')) and bool(soup('quote')) == True : # don't check if there are zero "epipgraph" and/or "quote" tags
quote_tags_in_epigraph = [0 if soup('epigraph')[epigraphs].quote == None \
else 1 for epigraphs in range(0,len(soup('epigraph')))] # how often is quote tag appearing in epigraph tag? (used to help hunt for untagged epigraphs in corpus)
else:
quote_tags_in_epigraph = [0] # either no "quote" or "epigraph" tags or neither, so no quote-in-epigraph tags
else:
epigraph_list = ['No Epigraphs']
epigraph_attribution = ['No Epigraphs']
if len(soup('publisher')) > 0:
publishers = [publisher.text for publisher in soup('publisher')]
else:
publishers = ['Unknown Publisher', 'Unknown Publisher','Unknown Publisher']
# (7) identify company/individuals that produced each xml file (for exploring provenance of corpus)
encoders = []
encoding_counter = 0
## for Early American Fiction corpus ...
if root[:3] == 'eaf': #Not ideal way to handle this, but encoder always 1st 'name' tag in EAF files
encoders.append(soup('name')[0].text)
encoding_counter = 1
## for Wright American Fiction corpus ...
if root[:3] == 'VAC': # Wright American Fiction corpus files begin with "VAC"
encoders = [soup('change')[encoder].get('who') for encoder in range(0,len(soup('change')))] #get encoders from 'who' attrs in 'change' tags
encoding_counter = 1
### remove duplicate encoders entries, if present
duplicate_list = []
for x in range(len(encoders)): #find duplicates and place in 'duplicate_list'
if x != 0:
if (encoders[0] == encoders[x]):
duplicate_list.append(x)
for deletions in range(len(duplicate_list)): # mark duplicates by replacing element with 'To Erase'
encoders[duplicate_list[deletions]] = "To Erase"
for deletions in range(len(encoders)-1,0,-1): # delete duplicate entries
if encoders[deletions] == "To Erase":
del encoders[deletions]
if encoding_counter == 0:
print('WARNING: No case selected for encoder attribution for ' + root + '. Check file.')
if len(encoders) == 0:
print('WARNING: no encoder info found for ' + root + '. Check file.')
# (8) identify epigraphs with 'quote' tag & tracking of who did encoding (see also lines 47-50)
total_epigraph_tags = len(soup('epigraph')) # number of tagged "epigraph"s in file
total_quote_tags = len(soup('quote')) # number of tagged "quote"s in file
if bool(soup('epigraph')) == True:
#print(quote_tags_in_epigraph)
quotes_in_epigraphs = sum(quote_tags_in_epigraph) # number of "quote"s in "epigraph"s
else:
quotes_in_epigraphs = 0
## CLEANING INFORMATION COLLECTED FROM CORPUS
# remove "/n" characters
epigraph_attribution = remove_characters(epigraph_attribution, '-\n')
author_list = remove_characters(author_list, '\n')
title_list = remove_characters(title_list, '\n')
publication_place = remove_characters(publication_place, '\n')
publishers = remove_characters(publishers, '\n')
pub_year = remove_characters([pub_year], '\n')[0]
encoders = str(remove_characters(encoders, '\n'))
readfile.close() #close file "x"
# Error Checking Print-To-Terminal: print all information collected
if (len(soup('epigraph')) == 0): #check if file has epigraphs
if (len(soup('author')) > 0):
epigraphlessFileCount += 1
else:
if (len(soup('author')) == 0):
author_list = ['NO AUTHOR TAG IN FILE. CHECK XML FILE!']
epigraphlessFileCount += 1
else:
for i in range(0, len(soup.findAll('epigraph'))):
if (len(soup.findAll('author')) == 0):
totalEpigraphCount += 1
else:
totalEpigraphCount += 1
#output to a CSV file -- NOTE: need to wrap strings in a list for csvwriter to output properly
with open('epigraph_metadata.csv', 'a') as csvfile: #output metadata
epi_meta = csv.writer(csvfile, dialect='excel')
for i in range(0,len(soup('epigraph'))):
if (len(soup('author')) ==0):
epi_meta.writerow(['junkrow | ' + str(i) + ' | ' + allFilesInDirectory[document] + ' | '+ str(document) + ' | ' + 'Unknown Author' + ' | ' + authorBirthYear + ' | ' + authorDeathYear + ' | ' + str(title_list[0])+ ' | ' + str(epigraph_attribution[i])+ ' | ' + str(publishers[1]) + ' | ' + str(publication_place[1])+ ' | ' + pub_year + ' | junkrow'])
else:
epi_meta.writerow(['junkrow | ' + str(i) + ' | ' + allFilesInDirectory[document] + ' | '+ str(document) + ' | ' + author_list[0] + ' | ' + authorBirthYear + ' | ' + authorDeathYear + ' | ' + str(title_list[0])+ ' | ' + str(epigraph_attribution[i])+ ' | ' + str(publishers[1]) + ' | ' + str(publication_place[1])+ ' | ' + pub_year + ' | junkrow'])
with open('epigraph_list.csv', 'a') as csvfile: #output metadata
epi_list = csv.writer(csvfile, dialect='excel')
for i in range(0,len(soup('epigraph'))):
epi_list.writerow([allFilesInDirectory[document] + " | " + str(document+1)+ ', epigraph ' + str(i+1)])
epi_list.writerow([epigraph_list[i]])
#output ratio of epigraphs-to-quotes for each file, warnings, & error checks
with open('epigraph_to_quotes.csv', 'a') as csvfile:
epi_to_quote = csv.writer(csvfile, dialect='excel')
if (document == 0):
epi_to_quote.writerow(['junkrow | file number | check? | file name | encoding credit | total epigraph tags | total quote tags | quote pairs in epigraphs | junkrow'])
author_error_check = 'Field Empty -- ERROR'
if len(soup('author')) == 0:
author_error_check = 'No Author Tags!'
else:
author_error_check = str(len(soup('author')))
checkFile = 'yes' #indicator to inspect novel page image
checkFile_count = 0 #how many texts do we need to inspect with our eyes
if count_tags(allFilesInDirectory[document], "epigraph") >= count_nested_tags(allFilesInDirectory[document], "quote", "epigraph") \
and count_tags(allFilesInDirectory[document], "epigraph") >= count_tags(allFilesInDirectory[document], "quote") \
or count_tags(allFilesInDirectory[document], "epigraph") >= 0 and count_tags(allFilesInDirectory[document], "quote") == 0:
checkFile = 'no'
else:
checkFile_count += 1
epi_to_quote.writerow(['junkrow | ' + str(document) + ' | ' + checkFile + ' | '+ str(allFilesInDirectory[document]) + ' | ' + encoders + ' | ' + str(total_epigraph_tags) + ' | ' + str(total_quote_tags) + ' | ' + str(quotes_in_epigraphs) + ' | junkrow'])
#Error Checking Print-To-Terminal: Print total number of epigraphs collected
print("TOTAl NUMBER OF EPIGRAPHS: " + str(totalEpigraphCount))
print("TOTAL NUMBER OF FILES: " + str(len(allFilesInDirectory)))
print("FILES WITHOUT EPIGRAPHS: " + str(epigraphlessFileCount))
print("TOTAL NUMBER OF FILES TO INSPECT: " + str(checkFile_count))
#NOTE FOR BS4: soup('epigraph') == soup.find_all('epigraph') == soup.findAll('epigraph')
|
|
#! /usr/bin/python
"""
Copyright 2013, Big Switch Networks, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
circuitpusher utilizes floodlight rest APIs to create a bidirectional circuit,
i.e., permanent flow entry, on all switches in route between two devices based
on IP addresses with specified priority.
Notes:
1. The circuit pusher currently only creates circuit with two IP end points
2. Prior to sending restAPI requests to the circuit pusher, the specified end
points must already been known to the controller (i.e., already have sent
packets on the network, easy way to assure this is to do a ping (to any
target) from the two hosts.
3. The current supported command syntax format is:
a) circuitpusher.py --controller={IP}:{rest port} --type mac --src {MAC} --dst {MAC} --add --name {circuit-name}
adds a new circuit between src and dst devices Currently ip circuit is supported. ARP is automatically supported.
Currently a simple circuit record storage is provided in a text file circuits.json in the working directory.
The file is not protected and does not clean itself between controller restarts. The file is needed for correct operation
and the user should make sure deleting the file when floodlight controller is restarted.
b) circuitpusher.py --controller={IP}:{rest port} --delete --name {circuit-name}
deletes a created circuit (as recorded in circuits.json) using the previously given name
@author kcwang
"""
import os
import sys
import subprocess
import json
import argparse
import io
import time
# parse circuit options. Currently supports add and delete actions.
# Syntax:
# circuitpusher --controller {IP:REST_PORT} --add --name {CIRCUIT_NAME} --type ip --src {MAC} --dst {MAC}
# circuitpusher --controller {IP:REST_PORT} --delete --name {CIRCUIT_NAME}
parser = argparse.ArgumentParser(description='Circuit Pusher')
parser.add_argument('--controller', dest='controllerRestIp', action='store', default='localhost:8080', help='controller IP:RESTport, e.g., localhost:8080 or A.B.C.D:8080')
parser.add_argument('--add', dest='action', action='store_const', const='add', default='add', help='action: add, delete')
parser.add_argument('--delete', dest='action', action='store_const', const='delete', default='add', help='action: add, delete')
parser.add_argument('--type', dest='type', action='store', default='mac', help='valid types: mac')
parser.add_argument('--src', dest='srcAddress', action='store', default='0.0.0.0', help='source address: if type=ip, A.B.C.D')
parser.add_argument('--dst', dest='dstAddress', action='store', default='0.0.0.0', help='destination address: if type=ip, A.B.C.D')
parser.add_argument('--name', dest='circuitName', action='store', default='circuit-1', help='name for circuit, e.g., circuit-1')
args = parser.parse_args()
print args
controllerRestIp = args.controllerRestIp
# first check if a local file exists, which needs to be updated after add/delete
if os.path.exists('./circuits.json'):
circuitDb = open('./circuits.json','r')
lines = circuitDb.readlines()
circuitDb.close()
else:
lines={}
if args.action=='add':
circuitDb = open('./circuits.json','a')
for line in lines:
data = json.loads(line)
if data['name']==(args.circuitName):
print "Circuit %s exists already. Use new name to create." % args.circuitName
sys.exit()
else:
circuitExists = False
# retrieve source and destination device attachment points
# using DeviceManager rest API
command = "curl -s http://%s/wm/device/?mac=%s" % (args.controllerRestIp, args.srcAddress)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
try:
sourceSwitch = parsedResult[0]['attachmentPoint'][0]['switchDPID']
except IndexError:
print "ERROR : the specified source end point (%s) must already been known to the controller (i.e., already have sent packets on the network, easy way to assure this is to do a ping (to any target) from the two hosts." % (args.srcAddress)
from pprint import pprint as pp; import ipdb; ipdb.set_trace()
sys.exit()
sourcePort = parsedResult[0]['attachmentPoint'][0]['port']
command = "curl -s http://%s/wm/device/?mac=%s" % (args.controllerRestIp, args.dstAddress)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
try:
destSwitch = parsedResult[0]['attachmentPoint'][0]['switchDPID']
except IndexError:
print "ERROR : the specified destination end point (%s) must already been known to the controller (i.e., already have sent packets on the network, easy way to assure this is to do a ping (to any target) from the two hosts." % (args.dstAddress)
sys.exit()
destPort = parsedResult[0]['attachmentPoint'][0]['port']
print "Creating circuit:"
print "from source device at switch %s port %s" % (sourceSwitch,sourcePort)
print "to destination device at switch %s port %s"% (destSwitch,destPort)
# retrieving route from source to destination
# using Routing rest API
command = "curl -s http://%s/wm/topology/route/%s/%s/%s/%s/json" % (controllerRestIp, sourceSwitch, sourcePort, destSwitch, destPort)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
print result+"\n"
for i in range(len(parsedResult)):
if i % 2 == 0:
ap1Dpid = parsedResult[i]['switch']
ap1Port = parsedResult[i]['port']
print ap1Dpid, ap1Port
else:
ap2Dpid = parsedResult[i]['switch']
ap2Port = parsedResult[i]['port']
print ap2Dpid, ap2Port
# send one flow mod per pair of APs in route
# using StaticFlowPusher rest API
# IMPORTANT NOTE: current Floodlight StaticflowEntryPusher
# assumes all flow entries to have unique name across all switches
# this will most possibly be relaxed later, but for now we
# encode each flow entry's name with both switch dpid, user
# specified name, and flow type (f: forward, r: reverse, farp/rarp: arp)
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"src-mac\":\"%s\", \"dst-mac\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".f", args.srcAddress, args.dstAddress, ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".farp", "0x806", ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"src-mac\":\"%s\", \"dst-mac\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".r", args.dstAddress, args.srcAddress, ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".rarp", "0x806", ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print command
# store created circuit attributes in local ./circuits.json
datetime = time.asctime()
circuitParams = {'name':args.circuitName, 'Dpid':ap1Dpid, 'inPort':ap1Port, 'outPort':ap2Port, 'datetime':datetime}
str = json.dumps(circuitParams)
circuitDb.write(str+"\n")
# confirm successful circuit creation
# using controller rest API
command="curl -s http://%s/wm/core/switch/all/flow/json| python -mjson.tool" % (controllerRestIp)
result = os.popen(command).read()
print command + "\n" + result
elif args.action=='delete':
circuitDb = open('./circuits.json','w')
# removing previously created flow from switches
# using StaticFlowPusher rest API
# currently, circuitpusher records created circuits in local file ./circuits.db
# with circuit name and list of switches
circuitExists = False
for line in lines:
data = json.loads(line)
if data['name']==(args.circuitName):
circuitExists = True
sw = data['Dpid']
print data, sw
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".f", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".farp", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".r", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".rarp", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
else:
circuitDb.write(line)
circuitDb.close()
if not circuitExists:
print "specified circuit does not exist"
sys.exit()
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-13 01:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='trips',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('miscidx', models.IntegerField(blank=True, default=0, null=True)),
('index', models.IntegerField(blank=True, default=0, null=True)),
('tripid', models.IntegerField(blank=True, default=0, null=True)),
('tripdate', models.DateField(blank=True, default=None, null=True)),
('picktime', models.TimeField(blank=True, default=None, null=True)),
('droptime', models.TimeField(blank=True, default=None, null=True)),
('provider', models.CharField(blank=True, max_length=255, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('routeid', models.IntegerField(blank=True, default=0, null=True)),
('pickhousenumber', models.IntegerField(blank=True, default=0, null=True)),
('pickaddress1', models.CharField(blank=True, max_length=255, null=True)),
('pickcity', models.CharField(blank=True, max_length=255, null=True)),
('pickcounty', models.CharField(blank=True, max_length=255, null=True)),
('pickzip', models.IntegerField(blank=True, default=0, null=True)),
('drophousenumber', models.IntegerField(blank=True, default=0, null=True)),
('dropaddress1', models.CharField(blank=True, max_length=255, null=True)),
('dropcity', models.CharField(blank=True, max_length=255, null=True)),
('dropcounty', models.CharField(blank=True, max_length=255, null=True)),
('dropzip', models.IntegerField(blank=True, default=0, null=True)),
('shared', models.BooleanField(default=False)),
('puzip', models.IntegerField(blank=True, default=0, null=True)),
('dozip', models.IntegerField(blank=True, default=0, null=True)),
('uid', models.IntegerField(blank=True, default=0, null=True)),
('paid', models.CharField(blank=True, max_length=255, null=True)),
('duid', models.CharField(blank=True, max_length=255, null=True)),
('count', models.IntegerField(blank=True, default=0, null=True)),
('pickboro', models.IntegerField(blank=True, default=0, null=True)),
('dropboro', models.IntegerField(blank=True, default=0, null=True)),
('upast', models.CharField(blank=True, max_length=255, null=True)),
('udast', models.CharField(blank=True, max_length=255, null=True)),
('pickdate', models.DateField(blank=True, default=None, null=True)),
('dropdate', models.DateField(blank=True, default=None, null=True)),
('pickhour', models.IntegerField(blank=True, default=0, null=True)),
('pickmin', models.IntegerField(blank=True, default=0, null=True)),
('drophour', models.IntegerField(blank=True, default=0, null=True)),
('dropmin', models.IntegerField(blank=True, default=0, null=True)),
('pickdaymins', models.IntegerField(blank=True, default=0, null=True)),
('dropdaymins', models.IntegerField(blank=True, default=0, null=True)),
('tripminsdelta', models.IntegerField(blank=True, default=0, null=True)),
('p_bctcb2010', models.IntegerField(blank=True, default=0, null=True)),
('p_lat', models.DecimalField(decimal_places=10, default=0, max_digits=12)),
('p_lon', models.DecimalField(decimal_places=10, default=0, max_digits=13)),
('p_count', models.IntegerField(blank=True, default=0, null=True)),
('p_val', models.BooleanField(default=False)),
('d_bctcb2010', models.IntegerField(blank=True, default=0, null=True)),
('d_lat', models.DecimalField(decimal_places=10, default=0, max_digits=12)),
('d_lon', models.DecimalField(decimal_places=10, default=0, max_digits=13)),
('d_count', models.IntegerField(blank=True, default=0, null=True)),
('d_val', models.BooleanField(default=False)),
('p_geoid', models.CharField(blank=True, max_length=255, null=True)),
('p_xcoord', models.DecimalField(decimal_places=10, default=0, max_digits=12)),
('p_ycoord', models.DecimalField(decimal_places=10, default=0, max_digits=13)),
('d_geoid', models.CharField(blank=True, max_length=255, null=True)),
('d_xcoord', models.DecimalField(decimal_places=10, default=0, max_digits=12)),
('d_ycoord', models.DecimalField(decimal_places=10, default=0, max_digits=13)),
('p_geoid_bg', models.CharField(blank=True, max_length=255, null=True)),
('d_geoid_bg', models.CharField(blank=True, max_length=255, null=True)),
('osrmminsdelta', models.DecimalField(decimal_places=2, default=0, max_digits=6)),
('osrm_dist', models.DecimalField(decimal_places=1, default=0, max_digits=10)),
('osrm_rval', models.BooleanField(default=False)),
('p_nr_bus', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('d_nr_bus', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('p_nr_sub', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('d_nr_sub', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('p_nr_hea', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('d_nr_hea', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('p_p_count', models.IntegerField(blank=True, default=0, null=True)),
('p_d_count', models.IntegerField(blank=True, default=0, null=True)),
('p_a_count', models.IntegerField(blank=True, default=0, null=True)),
('p_p0010001', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030001', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030002', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030003', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030004', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030005', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030006', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030007', models.IntegerField(blank=True, default=0, null=True)),
('p_p0030008', models.IntegerField(blank=True, default=0, null=True)),
('p_p0040001', models.IntegerField(blank=True, default=0, null=True)),
('p_p0040002', models.IntegerField(blank=True, default=0, null=True)),
('p_p0040003', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120001', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120002', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120003', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120004', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120005', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120006', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120007', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120008', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120009', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120010', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120011', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120012', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120013', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120014', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120015', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120016', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120017', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120018', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120019', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120020', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120021', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120022', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120023', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120024', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120025', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120026', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120027', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120028', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120029', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120030', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120031', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120032', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120033', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120034', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120035', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120036', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120037', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120038', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120039', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120040', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120041', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120042', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120043', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120044', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120045', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120046', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120047', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120048', models.IntegerField(blank=True, default=0, null=True)),
('p_p0120049', models.IntegerField(blank=True, default=0, null=True)),
('p_h00010001', models.IntegerField(blank=True, default=0, null=True)),
('p_h0030001', models.IntegerField(blank=True, default=0, null=True)),
('p_h0030002', models.IntegerField(blank=True, default=0, null=True)),
('p_h0030003', models.IntegerField(blank=True, default=0, null=True)),
('p_h0040001', models.IntegerField(blank=True, default=0, null=True)),
('p_h0040002', models.IntegerField(blank=True, default=0, null=True)),
('p_h0040003', models.IntegerField(blank=True, default=0, null=True)),
('p_h0040004', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050001', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050002', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050003', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050004', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050005', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050006', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050007', models.IntegerField(blank=True, default=0, null=True)),
('p_h0050008', models.IntegerField(blank=True, default=0, null=True)),
('p_p_pop', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('p_d_pop', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('p_a_pop', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('d_p_count', models.IntegerField(blank=True, default=0, null=True)),
('d_d_count', models.IntegerField(blank=True, default=0, null=True)),
('d_a_count', models.IntegerField(blank=True, default=0, null=True)),
('d_p0010001', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030001', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030002', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030003', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030004', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030005', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030006', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030007', models.IntegerField(blank=True, default=0, null=True)),
('d_p0030008', models.IntegerField(blank=True, default=0, null=True)),
('d_p0040001', models.IntegerField(blank=True, default=0, null=True)),
('d_p0040002', models.IntegerField(blank=True, default=0, null=True)),
('d_p0040003', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120001', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120002', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120003', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120004', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120005', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120006', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120007', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120008', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120009', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120010', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120011', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120012', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120013', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120014', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120015', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120016', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120017', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120018', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120019', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120020', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120021', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120022', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120023', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120024', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120025', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120026', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120027', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120028', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120029', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120030', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120031', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120032', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120033', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120034', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120035', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120036', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120037', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120038', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120039', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120040', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120041', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120042', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120043', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120044', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120045', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120046', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120047', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120048', models.IntegerField(blank=True, default=0, null=True)),
('d_p0120049', models.IntegerField(blank=True, default=0, null=True)),
('d_h00010001', models.IntegerField(blank=True, default=0, null=True)),
('d_h0030001', models.IntegerField(blank=True, default=0, null=True)),
('d_h0030002', models.IntegerField(blank=True, default=0, null=True)),
('d_h0030003', models.IntegerField(blank=True, default=0, null=True)),
('d_h0040001', models.IntegerField(blank=True, default=0, null=True)),
('d_h0040002', models.IntegerField(blank=True, default=0, null=True)),
('d_h0040003', models.IntegerField(blank=True, default=0, null=True)),
('d_h0040004', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050001', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050002', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050003', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050004', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050005', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050006', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050007', models.IntegerField(blank=True, default=0, null=True)),
('d_h0050008', models.IntegerField(blank=True, default=0, null=True)),
('d_p_pop', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('d_d_pop', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('d_a_pop', models.DecimalField(decimal_places=10, default=0, max_digits=20)),
('p_b01001_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_002e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_003e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_004e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_005e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_006e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_007e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_008e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_009e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_010e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_011e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_012e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_013e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_014e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_015e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_016e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_017e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_018e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_019e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_020e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_021e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_022e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_023e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_024e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_025e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_026e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_027e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_028e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_029e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_030e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_031e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_032e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_033e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_034e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_035e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_036e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_037e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_038e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_039e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_040e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_041e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_042e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_043e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_044e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_045e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_046e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_047e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_048e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_049e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01003_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b19013_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b18101_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_002e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_003e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_004e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_005e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_006e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_007e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_008e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_009e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_010e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_011e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_012e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_013e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_014e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_015e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_016e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_017e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_018e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_019e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_020e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_021e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_022e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_023e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_024e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_025e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_026e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_027e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_028e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_029e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_030e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_031e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_032e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_033e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_034e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_035e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_036e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_037e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_038e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_039e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_040e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_041e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_042e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_043e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_044e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_045e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_046e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_047e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_048e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_049e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b01003_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b19013_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('d_b18101_001e_x', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_001e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_002e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_003e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_004e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_005e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_006e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_007e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_008e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_009e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_010e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_011e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_012e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_013e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_014e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_015e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_016e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_017e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_018e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_019e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_020e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_021e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_022e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_023e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_024e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_025e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_026e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_027e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_028e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_029e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_030e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_031e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_032e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_033e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_034e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_035e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_036e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_037e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_038e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_039e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_040e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_041e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_042e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_043e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_044e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_045e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_046e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_047e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_048e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01001_049e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b01003_001e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b19013_001e_y', models.IntegerField(blank=True, default=0, null=True)),
('p_b18101_001e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_001e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_002e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_003e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_004e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_005e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_006e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_007e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_008e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_009e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_010e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_011e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_012e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_013e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_014e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_015e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_016e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_017e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_018e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_019e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_020e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_021e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_022e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_023e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_024e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_025e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_026e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_027e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_028e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_029e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_030e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_031e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_032e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_033e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_034e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_035e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_036e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_037e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_038e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_039e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_040e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_041e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_042e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_043e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_044e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_045e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_046e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_047e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_048e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01001_049e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b01003_001e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b19013_001e_y', models.IntegerField(blank=True, default=0, null=True)),
('d_b18101_001e_y', models.IntegerField(blank=True, default=0, null=True)),
],
),
]
|
|
import math
import numpy as np
import subprocess
import numbers
import importlib
import sys
import re
from itertools import chain, combinations
import numba
from numba.core import config, cpu
from numba import prange, njit
from numba.core.compiler import compile_isolated, Flags
from numba.tests.support import TestCase, tag, override_env_config
import unittest
needs_svml = unittest.skipUnless(config.USING_SVML,
"SVML tests need SVML to be present")
# a map of float64 vector lenghs with corresponding CPU architecture
vlen2cpu = {2: 'nehalem', 4: 'haswell', 8: 'skylake-avx512'}
# force LLVM to use AVX512 registers for vectorization
# https://reviews.llvm.org/D67259
vlen2cpu_features = {2: '', 4: '', 8: '-prefer-256-bit'}
# K: SVML functions, V: python functions which are expected to be SIMD-vectorized
# using SVML, explicit references to Python functions here are mostly for sake of
# instant import checks.
# TODO: [] and comments below mean unused/untested SVML function, it's to be
# either enabled or to be replaced with the explanation why the function
# cannot be used in Numba
# TODO: this test does not support functions with more than 1 arguments yet
# The test logic should be modified if there is an SVML function being used under
# different name or module from Python
svml_funcs = {
"sin": [np.sin, math.sin],
"cos": [np.cos, math.cos],
"pow": [], # pow, math.pow],
"exp": [np.exp, math.exp],
"log": [np.log, math.log],
"acos": [math.acos],
"acosh": [math.acosh],
"asin": [math.asin],
"asinh": [math.asinh],
"atan2": [], # math.atan2],
"atan": [math.atan],
"atanh": [math.atanh],
"cbrt": [], # np.cbrt],
"cdfnorm": [],
"cdfnorminv": [],
"ceil": [], # np.ceil, math.ceil],
"cosd": [],
"cosh": [np.cosh, math.cosh],
"erf": [math.erf], # np.erf is available in Intel Distribution
"erfc": [math.erfc],
"erfcinv": [],
"erfinv": [],
"exp10": [],
"exp2": [], # np.exp2],
"expm1": [np.expm1, math.expm1],
"floor": [], # np.floor, math.floor],
"fmod": [], # np.fmod, math.fmod],
"hypot": [], # np.hypot, math.hypot],
"invsqrt": [], # available in Intel Distribution
"log10": [np.log10, math.log10],
"log1p": [np.log1p, math.log1p],
"log2": [], # np.log2],
"logb": [],
"nearbyint": [],
"rint": [], # np.rint],
"round": [], # round],
"sind": [],
"sinh": [np.sinh, math.sinh],
"sqrt": [np.sqrt, math.sqrt],
"tan": [np.tan, math.tan],
"tanh": [np.tanh, math.tanh],
"trunc": [], # np.trunc, math.trunc],
}
# TODO: these functions are not vectorizable with complex types
complex_funcs_exclude = ["sqrt", "tan", "log10", "expm1", "log1p", "tanh", "log"]
# remove untested entries
svml_funcs = {k: v for k, v in svml_funcs.items() if len(v) > 0}
# lists for functions which belong to numpy and math modules correpondently
numpy_funcs = [f for f, v in svml_funcs.items() if "<ufunc" in \
[str(p).split(' ')[0] for p in v]]
other_funcs = [f for f, v in svml_funcs.items() if "<built-in" in \
[str(p).split(' ')[0] for p in v]]
def func_patterns(func, args, res, dtype, mode, vlen, flags, pad=' '*8):
"""
For a given function and its usage modes,
returns python code and assembly patterns it should and should not generate
"""
# generate a function call according to the usecase
if mode == "scalar":
arg_list = ','.join([a+'[0]' for a in args])
body = '%s%s[0] += math.%s(%s)\n' % (pad, res, func, arg_list)
elif mode == "numpy":
body = '%s%s += np.%s(%s)' % (pad, res, func, ','.join(args))
body += '.astype(np.%s)\n' % dtype if dtype.startswith('int') else '\n'
else:
assert mode == "range" or mode == "prange"
arg_list = ','.join([a+'[i]' for a in args])
body = '{pad}for i in {mode}({res}.size):\n' \
'{pad}{pad}{res}[i] += math.{func}({arg_list})\n'. \
format(**locals())
# TODO: refactor so this for-loop goes into umbrella function,
# 'mode' can be 'numpy', '0', 'i' instead
# TODO: it will enable mixed usecases like prange + numpy
# type specialization
is_f32 = dtype == 'float32' or dtype == 'complex64'
f = func+'f' if is_f32 else func
v = vlen*2 if is_f32 else vlen
# general expectations
prec_suff = '' if getattr(flags, 'fastmath', False) else '_ha'
scalar_func = '$_'+f if config.IS_OSX else '$'+f
svml_func = '__svml_%s%d%s,' % (f, v, prec_suff)
if mode == "scalar":
contains = [scalar_func]
avoids = ['__svml_', svml_func]
else: # will vectorize
contains = [svml_func]
avoids = [] # [scalar_func] - TODO: if possible, force LLVM to prevent
# generating the failsafe scalar paths
if vlen != 8 and (is_f32 or dtype == 'int32'): # Issue #3016
avoids += ['%zmm', '__svml_%s%d%s,' % (f, v*2, prec_suff)]
# special handling
if func == 'sqrt':
if mode == "scalar":
contains = ['sqrts']
avoids = [scalar_func, svml_func] # LLVM uses CPU instruction
elif vlen == 8:
contains = ['vsqrtp']
avoids = [scalar_func, svml_func] # LLVM uses CPU instruction
# else expect use of SVML for older architectures
return body, contains, avoids
def usecase_name(dtype, mode, vlen, flags):
""" Returns pretty name for given set of modes """
return "{dtype}_{mode}{vlen}_{flags.__name__}".format(**locals())
def combo_svml_usecase(dtype, mode, vlen, flags):
""" Combine multiple function calls under single umbrella usecase """
name = usecase_name(dtype, mode, vlen, flags)
body = """def {name}(n):
x = np.empty(n*8, dtype=np.{dtype})
ret = np.empty_like(x)\n""".format(**locals())
funcs = set(numpy_funcs if mode == "numpy" else other_funcs)
if dtype.startswith('complex'):
funcs = funcs.difference(complex_funcs_exclude)
contains = set()
avoids = set()
# fill body and expectation patterns
for f in funcs:
b, c, a = func_patterns(f, ['x'], 'ret', dtype, mode, vlen, flags)
avoids.update(a)
body += b
contains.update(c)
body += " "*8 + "return ret"
# now compile and return it along with its body in __doc__ and patterns
ldict = {}
exec(body, globals(), ldict)
ldict[name].__doc__ = body
return ldict[name], contains, avoids
@needs_svml
class TestSVMLGeneration(TestCase):
""" Tests all SVML-generating functions produce desired calls """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
# RE for a generic symbol reference and for each particular SVML function
asm_filter = re.compile('|'.join(['\$[a-z_]\w+,']+list(svml_funcs)))
@classmethod
def _inject_test(cls, dtype, mode, vlen, flags):
# unsupported combinations
if dtype.startswith('complex') and mode != 'numpy':
return
# TODO: address skipped tests below
skipped = dtype.startswith('int') and vlen == 2
args = (dtype, mode, vlen, flags)
# unit test body template
@unittest.skipUnless(not skipped, "Not implemented")
def test_template(self):
fn, contains, avoids = combo_svml_usecase(*args)
# look for specific patters in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', vlen2cpu[vlen]), \
override_env_config('NUMBA_CPU_FEATURES', vlen2cpu_features[vlen]):
# recompile for overridden CPU
try:
jit = compile_isolated(fn, (numba.int64, ), flags=flags)
except:
raise Exception("raised while compiling "+fn.__doc__)
asm = jit.library.get_asm_str()
missed = [pattern for pattern in contains if not pattern in asm]
found = [pattern for pattern in avoids if pattern in asm]
self.assertTrue(not missed and not found,
"While expecting %s and no %s,\n"
"it contains:\n%s\n"
"when compiling %s" % (str(missed), str(found), '\n'.join(
[line for line in asm.split('\n')
if cls.asm_filter.search(line) and not '"' in line]),
fn.__doc__))
# inject it into the class
setattr(cls, "test_"+usecase_name(*args), test_template)
@classmethod
def autogenerate(cls):
test_flags = ['fastmath', ] # TODO: add 'auto_parallel' ?
# generate all the combinations of the flags
test_flags = sum([list(combinations(test_flags, x)) for x in range( \
len(test_flags)+1)], [])
flag_list = [] # create Flag class instances
for ft in test_flags:
flags = Flags()
flags.nrt = True
flags.error_model = 'numpy'
flags.__name__ = '_'.join(ft+('usecase',))
for f in ft:
setattr(flags, f, {
'fastmath': cpu.FastMathOptions(True)
}.get(f, True))
flag_list.append(flags)
# main loop covering all the modes and use-cases
for dtype in ('complex64', 'float64', 'float32', 'int32', ):
for vlen in vlen2cpu:
for flags in flag_list:
for mode in "scalar", "range", "prange", "numpy":
cls._inject_test(dtype, mode, vlen, flags)
# mark important
for n in ( "test_int32_range4_usecase", # issue #3016
):
setattr(cls, n, tag("important")(getattr(cls, n)))
TestSVMLGeneration.autogenerate()
def math_sin_scalar(x):
return math.sin(x)
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
@needs_svml
class TestSVML(TestCase):
""" Tests SVML behaves as expected """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
def __init__(self, *args):
self.flags = Flags()
self.flags.nrt = True
# flags for njit(fastmath=True)
self.fastflags = Flags()
self.fastflags.nrt = True
self.fastflags.fastmath = cpu.FastMathOptions(True)
super(TestSVML, self).__init__(*args)
def compile(self, func, *args, **kwargs):
assert not kwargs
sig = tuple([numba.typeof(x) for x in args])
std = compile_isolated(func, sig, flags=self.flags)
fast = compile_isolated(func, sig, flags=self.fastflags)
return std, fast
def copy_args(self, *args):
if not args:
return tuple()
new_args = []
for x in args:
if isinstance(x, np.ndarray):
new_args.append(x.copy('k'))
elif isinstance(x, np.number):
new_args.append(x.copy())
elif isinstance(x, numbers.Number):
new_args.append(x)
else:
raise ValueError('Unsupported argument type encountered')
return tuple(new_args)
def check(self, pyfunc, *args, **kwargs):
jitstd, jitfast = self.compile(pyfunc, *args)
std_pattern = kwargs.pop('std_pattern', None)
fast_pattern = kwargs.pop('fast_pattern', None)
cpu_name = kwargs.pop('cpu_name', 'skylake-avx512')
# force LLVM to use AVX512 registers for vectorization
# https://reviews.llvm.org/D67259
cpu_features = kwargs.pop('cpu_features', '-prefer-256-bit')
# python result
py_expected = pyfunc(*self.copy_args(*args))
# jit result
jitstd_result = jitstd.entry_point(*self.copy_args(*args))
# fastmath result
jitfast_result = jitfast.entry_point(*self.copy_args(*args))
# assert numerical equality
np.testing.assert_almost_equal(jitstd_result, py_expected, **kwargs)
np.testing.assert_almost_equal(jitfast_result, py_expected, **kwargs)
# look for specific patters in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', cpu_name), \
override_env_config('NUMBA_CPU_FEATURES', cpu_features):
# recompile for overridden CPU
jitstd, jitfast = self.compile(pyfunc, *args)
if std_pattern:
self.check_svml_presence(jitstd, std_pattern)
if fast_pattern:
self.check_svml_presence(jitfast, fast_pattern)
def check_svml_presence(self, func, pattern):
asm = func.library.get_asm_str()
self.assertIn(pattern, asm)
def test_scalar_context(self):
# SVML will not be used.
pat = '$_sin' if config.IS_OSX else '$sin'
self.check(math_sin_scalar, 7., std_pattern=pat)
self.check(math_sin_scalar, 7., fast_pattern=pat)
def test_svml(self):
# loops both with and without fastmath should use SVML.
# The high accuracy routines are dropped if `fastmath` is set
std = "__svml_sin8_ha,"
fast = "__svml_sin8," # No `_ha`!
self.check(math_sin_loop, 10, std_pattern=std, fast_pattern=fast)
def test_svml_disabled(self):
code = """if 1:
import os
import numpy as np
import math
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
def check_no_svml():
try:
# ban the use of SVML
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
# delay numba imports to account for env change as
# numba.__init__ picks up SVML and it is too late by
# then to override using `numba.config`
import numba
from numba import config
from numba.core import cpu
from numba.tests.support import override_env_config
from numba.core.compiler import compile_isolated, Flags
# compile for overridden CPU, with and without fastmath
with override_env_config('NUMBA_CPU_NAME', 'skylake-avx512'), \
override_env_config('NUMBA_CPU_FEATURES', ''):
sig = (numba.int32,)
f = Flags()
f.nrt = True
std = compile_isolated(math_sin_loop, sig, flags=f)
f.fastmath = cpu.FastMathOptions(True)
fast = compile_isolated(math_sin_loop, sig, flags=f)
fns = std, fast
# assert no SVML call is present in the asm
for fn in fns:
asm = fn.library.get_asm_str()
assert '__svml_sin' not in asm
finally:
# not really needed as process is separate
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '0'
config.reload_config()
check_no_svml()
"""
popen = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
def test_svml_working_in_non_isolated_context(self):
@njit(fastmath={'fast'}, error_model="numpy")
def impl(n):
x = np.empty(n * 8, dtype=np.float64)
ret = np.empty_like(x)
for i in range(ret.size):
ret[i] += math.cosh(x[i])
return ret
impl(1)
self.assertTrue('intel_svmlcc' in impl.inspect_llvm(impl.signatures[0]))
if __name__ == '__main__':
unittest.main()
|
|
"""
Returns nicely-presented versions of the ranges between two dates
e.g.
27-29 May 2009
27 May-1 June 2009
8 pm-12 midnight
8 am-12 noon
8.30-9 am
"""
from datetime import date, time, datetime
DAYS_IN_MONTHS = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def days_in_month(date): #January = 1
if date.month ==2 and (date.year % 4) == 0:
return 29
else:
return DAYS_IN_MONTHS[date.month-1]
def _clean_dates(date1, date2):
if not date1:
raise TypeError("You must provide a start date")
if not date2:
date2 = date1
#Case: d2 is less than d1. Swap them
if date2 < date1:
dt = date2
date2 = date1
date1 = dt
return date1, date2
def pprint_date_span(date1, date2, space=" ", range_str="-"):
date1, date2 = _clean_dates(date1, date2)
d1d = date1.day #decimal (remove leading 0s)
d1m = date1.strftime("%B") #Month name
d1y = date1.strftime("%Y")
#Case: the two are equal; no range
if date1 == date2:
return "%d%s%s%s%s" % (d1d, space, d1m, space, d1y)
d2d = date2.day #decimal (remove leading 0s)
d2m = date2.strftime("%B") #Month name
d2y = date2.strftime("%Y")
#get rid of redundancies
if d1y == d2y:
d1y = ""
if d1m == d2m: d1m = ""
#by now we know that d1d and d2d are different (or in different months)
#Add spacing where necessary
if d1m!="":
if d1y!="":
d1m = "%s%s%s" % (space, d1m, space)
else:
d1m = "%s%s" % (space, d1m)
d2m = "%s%s%s" % (space, d2m, space)
return "%s%s%s%s%s%s%s" % (d1d, d1m, d1y, range_str, d2d, d2m, d2y)
def humanized_date_range(date1, date2, imply_year=True, space=" ", range_str="-"):
"""
Like the above, except that if date1 and date2 exactly define a month, then just name the month, rather than the date ranges.
Ditto with an exactly-defined year -- just return the year.
Also, if imply_year=True, then if both dates fall with in this year, then omit the year (unless the dates exactly describe the year).
"""
date1, date2 = _clean_dates(date1, date2)
if date1.day == 1 and date2.day == days_in_month(date2): #we've got an entire month range.
if date1.year == date2.year:
if date1.month == date2.month:
ds = "%s%s%s" % (date2.strftime("%B"), space, date2.year)
else:
if date1.month ==1 and date2.month == 12:
ds = "%s" % date1.year
else:
ds = "%s%s%s%s%s" % (date1.strftime("%B"), range_str, date2.strftime("%B"), space, date2.year)
else:
if date1.month ==1 and date2.month == 12:
ds = "%s%s%s" % (date1.year, range_str, date2.year)
else:
ds = "%s%s%s%s%s%s%s" % (date1.strftime("%B"), space, date1.year, range_str, date2.strftime("%B"), space, date2.year)
else:
ds = pprint_date_span(date1, date2, space, range_str)
if imply_year:
today = date.today()
if date1.year == date2.year and date1.year == today.year and ds != "%s" % date1.year:
#strip off the year
return ds[:-5]
return ds
def pprint_time_span(time1, time2, separator=":", am="am", pm="pm", midnight="midnight", noon="noon", range_str="-"):
if time1 == time2 == None:
raise Exception("need to provide at least one time")
apdict = {
'am': am,
'pm': pm,
'midnight': midnight,
'noon': noon,
'': '',
}
##THIS IS NOT LOCALE-TOLERANT. Assumption of am/pm.
if time1 is not None:
t1h = str(int(time1.strftime("%I"))) #decimal (remove leading 0s)
t1m = time1.strftime("%M") #leading 0s
t1ap = time1.strftime("%p").lower()
if t1h == "12" and t1m == "00":
if t1ap == "am":
t1ap = "midnight"
else:
t1ap = "noon"
t1h = ""
if t1ap not in (midnight, noon):
t1 = t1h+separator+t1m
else:
t1 = ""
#Case: the two are equal; no range
if (time1 == time2):
return "%s%s" % (t1, apdict[t1ap])
if time2 is None:
return "from %s%s" % (t1, apdict[t1ap])
t2h = str(int(time2.strftime("%I"))) #decimal (remove leading 0s)
t2m = time2.strftime("%M") #leading 0s
t2ap = time2.strftime("%p").lower()
if t2h == "12" and t2m == "00":
if t2ap == "am":
t2ap = "midnight"
else:
t2ap = "noon"
t2h = ""
if t2ap not in (midnight, noon):
t2 = t2h+separator+t2m
else:
t2 = ""
if time1 is not None:
#get rid of redundancies
# and am/pm
if t1ap == t2ap and time1 < time2:
t1ap = ""
return "%s%s%s%s%s" % (t1, apdict[t1ap], range_str, t2, apdict[t2ap])
else:
return "until %s%s" % (t2, apdict[t2ap])
def pprint_datetime_span(d1, t1, d2=None, t2=None,
infer_all_day=True,
space=" ",
date_range_str="-",
time_range_str="-",
separator=":",
grand_range_str=" - ",
am="am",
pm="pm",
noon="noon",
midnight="midnight",
):
datekwargs = {
'range_str': date_range_str,
'space': space,
}
timekwargs = {
'range_str': time_range_str,
'separator': separator,
'am': am,
'pm': pm,
'noon': noon,
'midnight': midnight,
}
if isinstance(d1, datetime):
dt1 = d1
dt2 = t1
#user has passed in datetimes instead
d1 = dt1.date()
t1 = dt1.time()
if isinstance(dt2, datetime):
d2 = dt2.date()
t2 = dt2.time()
else:
d2 = None
t2 = None
if t1 == time.min and t2 == time.max:
if infer_all_day:
return "all day on %s" % pprint_date_span(d1, d2, **datekwargs)
return pprint_date_span(d1, d2, **datekwargs)
if d1 == d2 and t1 is not None and t2 is not None:
return "%(d)s, %(t)s" % {
'd': pprint_date_span(d1, d1, **datekwargs),
't': pprint_time_span(t1, t2, **timekwargs),
}
d1r = pprint_date_span(d1, d1, **datekwargs)
if d2 is not None:
d2r = pprint_date_span(d2, d2, **datekwargs)
else:
d2r = None
if t1 is not None:
t1r = pprint_time_span(t1, t1, **timekwargs)
else:
t1r = None
if t2 is not None:
t2r = pprint_time_span(t2, t2, **timekwargs)
else:
t2r = None
datadict = {
'd1': d1r,
't1': t1r,
'd2': d2r,
't2': t2r
}
if d2 is not None and d2 != d1:
if t1 is not None:
if t2 is not None and t2 != t1:
formatstring = "%(d1)s, %(t1)s until %(t2)s on %(d2)s"
else:
formatstring = "%(d1)s, %(t1)s until %(d2)s"
else:
if t2 is not None:
formatstring = "%(d1)s until %(t2)s on %(d2)s"
else:
return pprint_date_span(d1, d2, **datekwargs) #*****
else:
if t1 is not None:
if t2 is not None and t2 != t1:
return "%(d)s, %(t)s" % {
'd': pprint_date_span(d1, d1, **datekwargs),
't': pprint_time_span(t1, t2, **timekwargs), # *******
}
else:
formatstring = "%(d1)s, %(t1)s"
else:
if t2 is not None:
formatstring = "%(d1)s until %(t2)s"
else:
formatstring = "%(d1)s"
return formatstring % datadict
if __name__ == "__main__":
import unittest
class TestDateTimeRange(unittest.TestCase):
def setUp(self):
self.ae = self.assertEqual
def test_normality(self):
d1 = date(2010, 9, 23)
d2 = date(2010, 9, 24)
t1 = time(12,42)
t2 = time(14,42)
dt1 = datetime.combine(d1, t1)
dt2 = datetime.combine(d2, t2)
self.ae(pprint_datetime_span(d1, None), "23 September 2010")
self.ae(pprint_datetime_span(d1, t1), "23 September 2010, 12:42pm")
self.ae(pprint_datetime_span(d1, t1, d1), "23 September 2010, 12:42pm")
self.ae(pprint_datetime_span(d1, t1, d1, t1), "23 September 2010, 12:42pm")
self.ae(pprint_datetime_span(d1, None, d2, None), "23-24 September 2010")
self.ae(pprint_datetime_span(d1, t1, None, t2), "23 September 2010, 12:42-2:42pm")
self.ae(pprint_datetime_span(d1, t1, d1, t2), "23 September 2010, 12:42-2:42pm")
self.ae(pprint_datetime_span(d1, t1, d2, t2), "23 September 2010, 12:42pm until 2:42pm on 24 September 2010")
self.ae(pprint_datetime_span(d1, None, d2, t2), "23 September 2010 until 2:42pm on 24 September 2010")
self.ae(pprint_datetime_span(d1, None, None, t2), "23 September 2010 until 2:42pm")
self.ae(pprint_datetime_span(d1, t1, d2), "23 September 2010, 12:42pm until 24 September 2010")
#datetimes
self.ae(pprint_datetime_span(dt1, None), pprint_datetime_span(d1, t1))
self.ae(pprint_datetime_span(dt1, dt2), pprint_datetime_span(d1, t1, d2, t2))
def test_identicality(self):
d1 = date(2010, 9, 23)
d2 = date(2010, 9, 23)
t1 = time(0,0)
t2 = time.max
self.ae(pprint_datetime_span(d1, t1, d2, t1), "23 September 2010, midnight")
self.ae(pprint_datetime_span(d1, None, d2, None), "23 September 2010")
self.ae(pprint_datetime_span(d1, t1, d2, t2), "all day on 23 September 2010")
def test_minmax(self):
dt1 = datetime.combine(date(2010, 9, 23), time.min)
dt2 = datetime.combine(date(2010, 9, 24), time.max)
self.ae(pprint_datetime_span(dt1, dt2, infer_all_day=False), "23-24 September 2010")
self.ae(pprint_datetime_span(dt1, dt2, infer_all_day=True), "all day on 23-24 September 2010")
def test_special(self):
d1 = date(2010, 9, 23)
d2 = date(2010, 9, 24)
t1 = time(00,00)
t2 = time(12,00)
self.ae(pprint_datetime_span(d1, None, d2, t2), "23 September 2010 until noon on 24 September 2010")
self.ae(pprint_datetime_span(d1, t1), "23 September 2010, midnight")
self.ae(pprint_datetime_span(d1, t1, None, t2), "23 September 2010, midnight-noon")
self.ae(pprint_datetime_span(d1, t1, d2), "23 September 2010, midnight until 24 September 2010")
self.ae(pprint_datetime_span(d1, t1, d2, t2), "23 September 2010, midnight until noon on 24 September 2010")
def test_formatting(self):
d1 = date(2010, 9, 23)
d2 = date(2010, 9, 24)
t1 = time(10,42)
t2 = time(14,42)
t3 = time(00,00)
t4 = time(12,00)
kwargs = {
'space': '.',
'date_range_str': " to ",
'time_range_str': "~",
'separator': "/",
'grand_range_str': " continuing to ",
'am': 'a.m.',
'pm': 'p.m.',
'noon': 'nooooon',
'midnight': 'the witching hour',
}
self.ae(pprint_datetime_span(d1, None, **kwargs), "23.September.2010")
self.ae(pprint_datetime_span(d1, None, d2, None, **kwargs), "23 to 24.September.2010")
self.ae(pprint_datetime_span(d1, None, d2, t2, **kwargs), "23.September.2010 until 2/42p.m. on 24.September.2010")
self.ae(pprint_datetime_span(d1, t1, **kwargs), "23.September.2010, 10/42a.m.")
self.ae(pprint_datetime_span(d1, t1, None, t2, **kwargs), "23.September.2010, 10/42a.m.~2/42p.m.")
self.ae(pprint_datetime_span(d1, t1, d2, **kwargs), "23.September.2010, 10/42a.m. until 24.September.2010")
self.ae(pprint_datetime_span(d1, t1, d2, t2, **kwargs), "23.September.2010, 10/42a.m. until 2/42p.m. on 24.September.2010")
self.ae(pprint_datetime_span(d1, t3, None, t4, **kwargs), "23.September.2010, the witching hour~nooooon")
class TestTimeRange(unittest.TestCase):
def setUp(self):
self.ae = self.assertEqual
def test_normality(self):
time1 = time(10,20)
time2 = time(10,40)
self.ae(pprint_time_span(time1, time2), "10:20-10:40am")
self.ae(pprint_time_span(time1, time1), "10:20am")
self.ae(pprint_time_span(time1, None), "from 10:20am")
self.ae(pprint_time_span(None, time2), "until 10:40am")
# might just want to display this.
self.ae(pprint_time_span(time2, time1), "10:40am-10:20am")
def test_formatting(self):
time1 = time(10,20)
time2 = time(14,40)
self.ae(pprint_time_span(time1, time2, separator=".", range_str=" to ", am=" a.m.", pm=" p.m."), "10.20 a.m. to 2.40 p.m.")
time1 = time(00,00)
time2 = time(12,00)
self.ae(pprint_time_span(time1, time2, range_str=" a ", midnight="midnuit", noon="midi"), "midnuit a midi")
def test_overflow(self):
time1 = time(10,50)
time2 = time(14,40)
self.ae(pprint_time_span(time1, time2), "10:50am-2:40pm")
def test_simplify(self):
time1 = time(10,00)
time2 = time(11,00)
time3 = time(11,30)
midnight = time(00,00)
noon = time(12,00)
self.ae(pprint_time_span(time1, time2), "10-11am")
self.ae(pprint_time_span(time1, noon), "10am-noon")
self.ae(pprint_time_span(midnight, time2), "midnight-11am")
self.ae(pprint_time_span(time1, time3), "10-11:30am")
self.ae(pprint_time_span(midnight, None), "from midnight")
self.ae(pprint_time_span(None, midnight), "until midnight")
self.ae(pprint_time_span(midnight, midnight), "midnight")
self.ae(pprint_time_span(noon, None), "from noon")
class TestDateRange(unittest.TestCase):
def setUp(self):
self.ae = self.assertEqual
def test_normality(self):
date1 = date(2001, 10, 10)
date2 = date(2001, 10, 12)
self.ae(pprint_date_span(date1, date2), "10-12 October 2001")
date1 = date(2001, 10, 10)
date2 = date(2001, 10, 10)
self.ae(pprint_date_span(date1, date2), "10 October 2001")
def test_formatting(self):
date1 = date(2001, 10, 10)
date2 = date(2002, 10, 12)
self.ae(pprint_date_span(date1, date2, range_str=" to ", space="."), "10.October.2001 to 12.October.2002")
def test_invalid(self):
date1 = date(2001, 10, 10)
date2 = date(2001, 11, 12)
self.ae(pprint_date_span(date1, None), "10 October 2001")
self.assertRaises(TypeError, pprint_date_span, None, date2)
#what if the dates are not in order
self.ae(pprint_date_span(date2, date1), "10 October-12 November 2001")
# same with humanize
self.ae(humanized_date_range(date1, None), "10 October 2001")
self.assertRaises(TypeError, humanized_date_range, None, date2)
#what if the dates are not in order
self.ae(humanized_date_range(date2, date1), "10 October-12 November 2001")
def test_overflow(self):
date1 = date(2001, 10, 10)
date2 = date(2001, 11, 12)
self.ae(pprint_date_span(date1, date2), "10 October-12 November 2001")
date1 = date(2001, 10, 10)
date2 = date(2002, 10, 12)
self.ae(pprint_date_span(date1, date2), "10 October 2001-12 October 2002")
def test_humanize_date(self):
# Check days are omitted if the range exactly covers the month.
date1 = date(2001, 7, 1)
date2 = date(2001, 7, 31)
self.ae(humanized_date_range(date1, date2), "July 2001")
#Or if it exactly covers 2 or more months
date1 = date(2002, 7, 1)
date2 = date(2002, 8, 31)
self.ae(humanized_date_range(date1, date2), "July-August 2002")
date1 = date(2001, 7, 1)
date2 = date(2002, 8, 31)
self.ae(humanized_date_range(date1, date2), "July 2001-August 2002")
date1 = date(2001, 7, 1)
date2 = date(2002, 7, 31)
self.ae(humanized_date_range(date1, date2), "July 2001-July 2002")
#check that months and days are omitted if the range exactly covers the year
date1 = date(2001, 1, 1)
date2 = date(2001, 12, 31)
self.ae(humanized_date_range(date1, date2), "2001")
date1 = date(2001, 1, 1)
date2 = date(2003, 12, 31)
self.ae(humanized_date_range(date1, date2), "2001-2003")
#Check that the year is omitted for ranges entirely in this year, unless imply_year = False
today = date.today()
date1 = date(today.year, 1, 12)
date2 = date(today.year, 1, 14)
self.ae(humanized_date_range(date1, date2), "12-14 January")
self.ae(humanized_date_range(date1, date2, imply_year=False), "12-14 January %s" % today.year)
date1 = date(today.year, 1, 12)
date2 = date(today.year, 2, 14)
self.ae(humanized_date_range(date1, date2), "12 January-14 February")
self.ae(humanized_date_range(date1, date2, imply_year=False), "12 January-14 February %s" % today.year)
#(but not for ranges spanning years)
date1 = date(today.year, 12, 1)
date2 = date(today.year+1, 3, 31)
self.ae(humanized_date_range(date1, date2), "December %s-March %s" % (date1.year, date2.year))
self.ae(humanized_date_range(date1, date2, imply_year=False), "December %s-March %s" % (date1.year, date2.year))
#And if it's the whole month range in this year, all you need is the month name.
date1 = date(today.year, today.month, 1)
date2 = date(today.year, today.month, days_in_month(today))
self.ae(humanized_date_range(date1, date2), today.strftime("%B"))
self.ae(humanized_date_range(date1, date2, imply_year=False), "%s %s" % (today.strftime("%B"), today.year))
date1 = date(today.year, today.month-1, 1)
date2 = date(today.year, today.month, days_in_month(today))
self.ae(humanized_date_range(date1, date2), "%s-%s" % (date1.strftime("%B"), date2.strftime("%B")))
self.ae(humanized_date_range(date1, date2, imply_year=False), "%s-%s %s" % (date1.strftime("%B"), date2.strftime("%B"), today.year))
#(don't omit this year if the range is the whole year)
date1 = date(today.year, 1, 1)
date2 = date(today.year, 12, 31)
self.ae(humanized_date_range(date1, date2), "%d" % today.year)
self.ae(humanized_date_range(date1, date2, imply_year=False), "%d" % today.year)
unittest.main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.pytree_unwrapper."""
import textwrap
import unittest
from yapf.yapflib import pytree_utils
from yapftests import yapf_test_helper
class PytreeUnwrapperTest(yapf_test_helper.YAPFTest):
def _CheckUnwrappedLines(self, uwlines, list_of_expected):
"""Check that the given UnwrappedLines match expectations.
Args:
uwlines: list of UnwrappedLine
list_of_expected: list of (depth, values) pairs. Non-semantic tokens are
filtered out from the expected values.
"""
actual = []
for uwl in uwlines:
filtered_values = [
ft.value
for ft in uwl.tokens
if ft.name not in pytree_utils.NONSEMANTIC_TOKENS
]
actual.append((uwl.depth, filtered_values))
self.assertEqual(list_of_expected, actual)
def testSimpleFileScope(self):
code = textwrap.dedent(r"""
x = 1
# a comment
y = 2
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['x', '=', '1']),
(0, ['# a comment']),
(0, ['y', '=', '2']),
])
def testSimpleMultilineStatement(self):
code = textwrap.dedent(r"""
y = (1 +
x)
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['y', '=', '(', '1', '+', 'x', ')']),
])
def testFileScopeWithInlineComment(self):
code = textwrap.dedent(r"""
x = 1 # a comment
y = 2
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['x', '=', '1', '# a comment']),
(0, ['y', '=', '2']),
])
def testSimpleIf(self):
code = textwrap.dedent(r"""
if foo:
x = 1
y = 2
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'foo', ':']),
(1, ['x', '=', '1']),
(1, ['y', '=', '2']),
])
def testSimpleIfWithComments(self):
code = textwrap.dedent(r"""
# c1
if foo: # c2
x = 1
y = 2
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['# c1']),
(0, ['if', 'foo', ':', '# c2']),
(1, ['x', '=', '1']),
(1, ['y', '=', '2']),
])
def testIfWithCommentsInside(self):
code = textwrap.dedent(r"""
if foo:
# c1
x = 1 # c2
# c3
y = 2
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'foo', ':']),
(1, ['# c1']),
(1, ['x', '=', '1', '# c2']),
(1, ['# c3']),
(1, ['y', '=', '2']),
])
def testIfElifElse(self):
code = textwrap.dedent(r"""
if x:
x = 1 # c1
elif y: # c2
y = 1
else:
# c3
z = 1
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'x', ':']),
(1, ['x', '=', '1', '# c1']),
(0, ['elif', 'y', ':', '# c2']),
(1, ['y', '=', '1']),
(0, ['else', ':']),
(1, ['# c3']),
(1, ['z', '=', '1']),
])
def testNestedCompoundTwoLevel(self):
code = textwrap.dedent(r"""
if x:
x = 1 # c1
while t:
# c2
j = 1
k = 1
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'x', ':']),
(1, ['x', '=', '1', '# c1']),
(1, ['while', 't', ':']),
(2, ['# c2']),
(2, ['j', '=', '1']),
(1, ['k', '=', '1']),
])
def testSimpleWhile(self):
code = textwrap.dedent(r"""
while x > 1: # c1
# c2
x = 1
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['while', 'x', '>', '1', ':', '# c1']),
(1, ['# c2']),
(1, ['x', '=', '1']),
])
def testSimpleTry(self):
code = textwrap.dedent(r"""
try:
pass
except:
pass
except:
pass
else:
pass
finally:
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['try', ':']),
(1, ['pass']),
(0, ['except', ':']),
(1, ['pass']),
(0, ['except', ':']),
(1, ['pass']),
(0, ['else', ':']),
(1, ['pass']),
(0, ['finally', ':']),
(1, ['pass']),
])
def testSimpleFuncdef(self):
code = textwrap.dedent(r"""
def foo(x): # c1
# c2
return x
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['def', 'foo', '(', 'x', ')', ':', '# c1']),
(1, ['# c2']),
(1, ['return', 'x']),
])
def testTwoFuncDefs(self):
code = textwrap.dedent(r"""
def foo(x): # c1
# c2
return x
def bar(): # c3
# c4
return x
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['def', 'foo', '(', 'x', ')', ':', '# c1']),
(1, ['# c2']),
(1, ['return', 'x']),
(0, ['def', 'bar', '(', ')', ':', '# c3']),
(1, ['# c4']),
(1, ['return', 'x']),
])
def testSimpleClassDef(self):
code = textwrap.dedent(r"""
class Klass: # c1
# c2
p = 1
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['class', 'Klass', ':', '# c1']),
(1, ['# c2']),
(1, ['p', '=', '1']),
])
def testSingleLineStmtInFunc(self):
code = textwrap.dedent(r"""
def f(): return 37
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['def', 'f', '(', ')', ':']),
(1, ['return', '37']),
])
def testMultipleComments(self):
code = textwrap.dedent(r"""
# Comment #1
# Comment #2
def f():
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['# Comment #1']),
(0, ['# Comment #2']),
(0, ['def', 'f', '(', ')', ':']),
(1, ['pass']),
])
def testSplitListWithComment(self):
code = textwrap.dedent(r"""
a = [
'a',
'b',
'c', # hello world
]
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [(0, [
'a', '=', '[', "'a'", ',', "'b'", ',', "'c'", ',', '# hello world', ']'
])])
class MatchBracketsTest(yapf_test_helper.YAPFTest):
def _CheckMatchingBrackets(self, uwlines, list_of_expected):
"""Check that the tokens have the expected matching bracket.
Arguments:
uwlines: list of UnwrappedLine.
list_of_expected: list of (index, index) pairs. The matching brackets at
the indexes need to match. Non-semantic tokens are filtered out from the
expected values.
"""
actual = []
for uwl in uwlines:
filtered_values = [(ft, ft.matching_bracket)
for ft in uwl.tokens
if ft.name not in pytree_utils.NONSEMANTIC_TOKENS]
if filtered_values:
actual.append(filtered_values)
for index, bracket_list in enumerate(list_of_expected):
uwline = actual[index]
if not bracket_list:
for value in uwline:
self.assertIsNone(value[1])
else:
for open_bracket, close_bracket in bracket_list:
self.assertEqual(uwline[open_bracket][0], uwline[close_bracket][1])
self.assertEqual(uwline[close_bracket][0], uwline[open_bracket][1])
def testFunctionDef(self):
code = textwrap.dedent("""\
def foo(a, b=['w','d'], c=[42, 37]):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckMatchingBrackets(uwlines, [
[(2, 20), (7, 11), (15, 19)],
[],
])
def testDecorator(self):
code = textwrap.dedent("""\
@bar()
def foo(a, b, c):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckMatchingBrackets(uwlines, [
[(2, 3)],
[(2, 8)],
[],
])
def testClassDef(self):
code = textwrap.dedent("""\
class A(B, C, D):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
self._CheckMatchingBrackets(uwlines, [
[(2, 8)],
[],
])
if __name__ == '__main__':
unittest.main()
|
|
from sqlalchemy import and_
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import VARCHAR
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class InsertExecTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.requires.multivalues_inserts
def test_multivalues_insert(self, connection):
users = self.tables.users
connection.execute(
users.insert().values(
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
]
)
)
rows = connection.execute(
users.select().order_by(users.c.user_id)
).all()
eq_(rows[0], (7, "jack"))
eq_(rows[1], (8, "ed"))
connection.execute(users.insert().values([(9, "jack"), (10, "ed")]))
rows = connection.execute(
users.select().order_by(users.c.user_id)
).all()
eq_(rows[2], (9, "jack"))
eq_(rows[3], (10, "ed"))
def test_insert_heterogeneous_params(self, connection):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2\n"
r"\[SQL: u?INSERT INTO users",
connection.execute,
users.insert(),
[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
],
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
connection.execute(
users.insert(),
[
{"user_id": 7},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
],
)
def _test_lastrow_accessor(self, table_, values, assertvalues):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table_, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key
):
is_(bool(comp.returning), True)
with engine.begin() as connection:
result = connection.execute(table_.insert(), values)
ret = values.copy()
ipk = result.inserted_primary_key
for col, id_ in zip(table_.primary_key, ipk):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id_
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
)
]
)
row = connection.execute(
table_.select().where(criterion)
).first()
for c in table_.c:
ret[c.key] = row._mapping[c]
return ret, ipk
if testing.against("postgresql", "oracle", "mssql"):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={"implicit_returning": False}),
engines.testing_engine(options={"implicit_returning": True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i, ipk = insert_values(engine, table_, values)
eq_(i, assertvalues)
# named tuple tests
for col in table_.primary_key:
eq_(getattr(ipk, col.key), assertvalues[col.key])
eq_(ipk._mapping[col.key], assertvalues[col.key])
eq_(
ipk._fields, tuple([col.key for col in table_.primary_key])
)
finally:
table_.drop(bind=engine)
@testing.skip_if("sqlite")
def test_lastrow_accessor_one(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_two(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t2",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi"},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_three(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t3",
metadata,
Column("id", String(40), primary_key=True),
Column("foo", String(30), primary_key=True),
Column("bar", String(30)),
),
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
)
@testing.requires.sequences
def test_lastrow_accessor_four(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4",
metadata,
Column(
"id",
Integer,
Sequence("t4_id_seq", optional=True),
primary_key=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi", "id": 1},
{"id": 1, "foo": "hi", "bar": "hi"},
)
@testing.requires.sequences
def test_lastrow_accessor_four_a(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4",
metadata,
Column(
"id",
Integer,
Sequence("t4_id_seq"),
primary_key=True,
),
Column("foo", String(30)),
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
def test_lastrow_accessor_five(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t5",
metadata,
Column("id", String(10), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"id": "id1"},
{"id": "id1", "bar": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_six(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t6",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("bar", Integer, primary_key=True),
),
{"bar": 0},
{"id": 1, "bar": 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on("sqlite+pysqlite")
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
"t",
self.metadata,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
with eng.begin() as conn:
t.create(conn)
r = conn.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, (0,))
@testing.fails_on(
"sqlite", "sqlite autoincrement doesn't work with composite pks"
)
@testing.provide_metadata
def test_misordered_lastrow(self, connection):
metadata = self.metadata
related = Table(
"related",
metadata,
Column("id", Integer, primary_key=True),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
t6 = Table(
"t6",
metadata,
Column(
"manual_id",
Integer,
ForeignKey("related.id"),
primary_key=True,
),
Column(
"auto_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
mysql_engine="MyISAM",
mariadb_engine="MyISAM",
)
metadata.create_all(connection)
r = connection.execute(related.insert().values(id=12))
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = connection.execute(t6.insert().values(manual_id=id_))
eq_(r.inserted_primary_key, (12, 1))
def test_implicit_id_insert_select_columns(self, connection):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20),
)
r = connection.execute(stmt)
eq_(r.inserted_primary_key, (None,))
def test_implicit_id_insert_select_keys(self, connection):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20),
)
r = connection.execute(stmt)
eq_(r.inserted_primary_key, (None,))
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self, connection):
users = self.tables.users
result = connection.execute(
users.insert().returning(users.c.user_id, users.c.user_name)
)
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline() method, lower-case 't' tables.
"""
run_create_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column(
"id",
Integer,
Sequence("t_id_seq"),
primary_key=True,
),
Column("data", String(50)),
Column("x", Integer),
)
Table(
"foo_no_seq",
metadata,
# note this will have full AUTO INCREMENT on MariaDB
# whereas "foo" will not due to sequence support
Column(
"id",
Integer,
primary_key=True,
),
Column("data", String(50)),
Column("x", Integer),
)
def _fixture(self, types=True):
if types:
t = sql.table(
"foo",
sql.column("id", Integer),
sql.column("data", String),
sql.column("x", Integer),
)
else:
t = sql.table(
"foo", sql.column("id"), sql.column("data"), sql.column("x")
)
return t
def _test(
self,
connection,
stmt,
row,
returning=None,
inserted_primary_key=False,
table=None,
):
r = connection.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
if table is None:
table = self.tables.foo
eq_(connection.execute(table.select()).first(), row)
def _test_multi(self, connection, stmt, rows, data):
connection.execute(stmt, rows)
eq_(
connection.execute(
self.tables.foo.select().order_by(self.tables.foo.c.id)
).all(),
data,
)
@testing.requires.sequences
def test_explicit_sequence(self, connection):
t = self._fixture()
self._test(
connection,
t.insert().values(
id=func.next_value(Sequence("t_id_seq")), data="data", x=5
),
(testing.db.dialect.default_sequence_base, "data", 5),
)
def test_uppercase(self, connection):
t = self.tables.foo
self._test(
connection,
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=(1,),
)
def test_uppercase_inline(self, connection):
t = self.tables.foo
self._test(
connection,
t.insert().inline().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=(1,),
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue",
)
def test_uppercase_inline_implicit(self, connection):
t = self.tables.foo
self._test(
connection,
t.insert().inline().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=(None,),
)
def test_uppercase_implicit(self, connection):
t = self.tables.foo
self._test(
connection,
t.insert().values(data="data", x=5),
(testing.db.dialect.default_sequence_base, "data", 5),
inserted_primary_key=(testing.db.dialect.default_sequence_base,),
)
def test_uppercase_direct_params(self, connection):
t = self.tables.foo
self._test(
connection,
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=(1,),
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self, connection):
t = self.tables.foo
self._test(
connection,
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.requires.sql_expressions_inserted_as_primary_key
def test_sql_expr_lastrowid(self, connection):
# see also test.orm.test_unitofwork.py
# ClauseAttributesTest.test_insert_pk_expression
t = self.tables.foo_no_seq
self._test(
connection,
t.insert().values(id=literal(5) + 10, data="data", x=5),
(15, "data", 5),
inserted_primary_key=(15,),
table=self.tables.foo_no_seq,
)
def test_direct_params(self, connection):
t = self._fixture()
self._test(
connection,
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=(),
)
@testing.requires.returning
def test_direct_params_returning(self, connection):
t = self._fixture()
self._test(
connection,
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(testing.db.dialect.default_sequence_base, "data", 5),
returning=(testing.db.dialect.default_sequence_base, 5),
)
# there's a non optional Sequence in the metadata, which if the dialect
# supports sequences, it means the CREATE TABLE should *not* have
# autoincrement, so the INSERT below would fail because the "t" fixture
# does not indicate the Sequence
@testing.fails_if(testing.requires.sequences)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self, connection):
t = self._fixture()
self._test(
connection,
t.insert().values(data="data", x=5),
(testing.db.dialect.default_sequence_base, "data", 5),
inserted_primary_key=(),
)
@testing.fails_if(testing.requires.sequences)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self, connection):
t = self._fixture()
self._test_multi(
connection,
t.insert(),
[
{"data": "d1", "x": 5},
{"data": "d2", "x": 6},
{"data": "d3", "x": 7},
],
[(1, "d1", 5), (2, "d2", 6), (3, "d3", 7)],
)
@testing.fails_if(testing.requires.sequences)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self, connection):
t = self._fixture()
self._test(
connection,
t.insert().inline().values(data="data", x=5),
(testing.db.dialect.default_sequence_base, "data", 5),
inserted_primary_key=(),
)
|
|
# -*- coding: utf-8 -*-
"""
Missing Person Registry
@author: nursix
"""
module = request.controller
prefix = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % prefix)
MISSING = str(T("Missing"))
FOUND = str(T("Found"))
DETAILS = str(T("Details"))
action = lambda l, u: dict(label=str(l), url=str(u), _class="action-btn")
# -----------------------------------------------------------------------------
def index():
""" Home Page """
try:
module_name = settings.modules[prefix].name_nice
except:
module_name = T("Missing Persons Registry")
prefix = "pr"
resourcename = "person"
tablename = "%s_%s" % (prefix, resourcename)
table = s3db[tablename]
report_url = URL(c="mpr", f=resourcename,
args=["[id]", "note"],
vars=dict(status="missing"))
s3db.configure(tablename,
create_next=report_url,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group",
"missing"])
def prep(r):
if r.representation == "html":
if not r.id and not r.method:
r.method = "search"
else:
redirect(URL(resourcename, args=request.args))
return True
s3.prep = prep
def postp(r, output):
s3.actions = []
if not r.component:
open_button_label = DETAILS
if auth.s3_logged_in():
mreport = URL(resourcename,
args=["[id]", "note", "create"],
vars=dict(status="missing"))
freport = URL(resourcename,
args=["[id]", "note", "create"],
vars=dict(status="found"))
s3.actions = [action(MISSING, mreport),
action(FOUND, freport)]
# Is the current user reported missing?
if isinstance(output, dict):
person = s3_logged_in_person()
if person and db.pr_person[person].missing:
myself = URL(resourcename,
args=[person, "note", "create"],
vars=dict(status="found"))
output.update(myself=myself)
else:
open_button_label = UPDATE
#linkto = r.resource.crud._linkto(r, update=True)("[id]")
linkto = URL(resourcename,
args=["[id]", "note"])
s3.actions.append(action(open_button_label, linkto))
return output
s3.postp = postp
output = s3_rest_controller(prefix, resourcename,
module_name=module_name)
response.view = "mpr/index.html"
response.title = module_name
return output
# -----------------------------------------------------------------------------
def person():
""" Missing Persons List """
prefix = "pr"
tablename = "%s_%s" % (prefix, resourcename)
table = s3db[tablename]
s3.crud_strings[tablename].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
s3db.configure(tablename,
create_next = URL(c="mpr", f="person",
args=["[id]", "note", "create"],
vars=dict(status="missing")),
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group",
"missing"
])
def prep(r):
if r.interactive and not r.id:
r.resource.add_filter(db.pr_person.missing == True)
if r.component_name == "config":
_config = s3db.gis_config
defaults = db(_config.id == 1).select(limitby=(0, 1)).first()
for key in defaults.keys():
if key not in ["id",
"uuid",
"mci",
"update_record",
"delete_record"]:
_config[key].default = defaults[key]
elif r.component_name == "note":
ntable = db.pr_note
status = r.vars.get("status", None)
if status:
if status == "missing":
ntable.status.default = 1
ntable.status.writable = False
ntable.timestmp.label = T("Date/Time when last seen")
ntable.note_text.label = T("Circumstances of disappearance")
s3.crud_strings[str(ntable)].update(
title_create = "Add Missing Report",
subtitle_create = "Add Missing Report")
elif status == "found":
ntable.status.default = 2
ntable.status.writable = False
ntable.timestmp.label = T("Date/Time when found")
ntable.note_text.label = T("Comments")
s3.crud_strings[str(ntable)].update(
title_create = "Add Find Report",
subtitle_create = "Add Find Report")
else:
ntable.status.default = 99
ntable.status.writable = True
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
label = READ
linkto = URL(f="person",
args=("[id]", "note"))
else:
label = UPDATE
linkto = r.resource.crud._linkto(r)("[id]")
s3.actions = [action(label, linkto)]
if not r.component:
label = FOUND
linkto = URL(f="person",
args=("[id]", "note", "create"),
vars=dict(status="found"))
s3.actions.append(action(label, linkto))
return output
s3.postp = postp
ptable = db.pr_person
field = ptable.missing
field.default = True
field.readable = False
field.writable = False
ptable.pe_label.readable = False
ptable.pe_label.writable = False
ptable.occupation.readable = False
ptable.occupation.writable = False
ptable.age_group.readable = True
ptable.age_group.writable = True
mpr_tabs = [(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note")]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
output = s3_rest_controller("pr", resourcename, rheader=rheader)
return output
# END =========================================================================
|
|
from collections import OrderedDict, Iterable
from itertools import takewhile, count
try:
from itertools import izip, ifilter
except ImportError: #Python3
izip = zip
ifilter = filter
from datetime import datetime, timedelta
import numpy as np
from scipy.optimize import leastsq, fsolve
from astro import astro
import constituent
d2r, r2d = np.pi/180.0, 180.0/np.pi
class Tide(object):
dtype = np.dtype([
('constituent', object),
('amplitude', float),
('phase', float)])
def __init__(
self,
constituents = None,
amplitudes = None,
phases = None,
model = None,
radians = False
):
"""
Initialise a tidal model. Provide constituents, amplitudes and phases OR a model.
Arguments:
constituents -- list of constituents used in the model.
amplitudes -- list of amplitudes corresponding to constituents
phases -- list of phases corresponding to constituents
model -- an ndarray of type Tide.dtype representing the constituents, amplitudes and phases.
radians -- boolean representing whether phases are in radians (default False)
"""
if None not in [constituents, amplitudes, phases]:
if len(constituents) == len(amplitudes) == len(phases):
model = np.zeros(len(phases), dtype=Tide.dtype)
model['constituent'] = np.array(constituents)
model['amplitude'] = np.array(amplitudes)
model['phase'] = np.array(phases)
else:
raise ValueError("Constituents, amplitudes and phases should all be arrays of equal length.")
elif model is not None:
if not model.dtype == Tide.dtype:
raise ValueError("Model must be a numpy array with dtype == Tide.dtype")
else:
raise ValueError("Must be initialised with constituents, amplitudes and phases; or a model.")
if radians:
model['phase'] = r2d*model['phase']
self.model = model[:]
self.normalize()
def prepare(self, *args, **kwargs):
return Tide._prepare(self.model['constituent'], *args, **kwargs)
@staticmethod
def _prepare(constituents, t0, t = None, radians = True):
"""
Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True)
"""
#The equilibrium argument is constant and taken at the beginning of the
#time series (t0). The speed of the equilibrium argument changes very
#slowly, so again we take it to be constant over any length of data. The
#node factors change more rapidly.
if isinstance(t0, Iterable):
t0 = t0[0]
if t is None:
t = [t0]
if not isinstance(t, Iterable):
t = [t]
a0 = astro(t0)
a = [astro(t_i) for t_i in t]
#For convenience give u, V0 (but not speed!) in [0, 360)
V0 = np.array([c.V(a0) for c in constituents])[:, np.newaxis]
speed = np.array([c.speed(a0) for c in constituents])[:, np.newaxis]
u = [np.mod(np.array([c.u(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
f = [np.mod(np.array([c.f(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
if radians:
speed = d2r*speed
V0 = d2r*V0
u = [d2r*each for each in u]
return speed, u, f, V0
def at(self, t):
"""
Return the modelled tidal height at given times.
Arguments:
t -- array of times at which to evaluate the tidal height
"""
t0 = t[0]
hours = self._hours(t0, t)
partition = 240.0
t = self._partition(hours, partition)
times = self._times(t0, [(i + 0.5)*partition for i in range(len(t))])
speed, u, f, V0 = self.prepare(t0, times, radians = True)
H = self.model['amplitude'][:, np.newaxis]
p = d2r*self.model['phase'][:, np.newaxis]
return np.concatenate([
Tide._tidal_series(t_i, H, p, speed, u_i, f_i, V0)
for t_i, u_i, f_i in izip(t, u, f)
])
def highs(self, *args):
"""
Generator yielding only the high tides.
Arguments:
see Tide.extrema()
"""
for t in ifilter(lambda e: e[2] == 'H', self.extrema(*args)):
yield t
def lows(self, *args):
"""
Generator yielding only the low tides.
Arguments:
see Tide.extrema()
"""
for t in ifilter(lambda e: e[2] == 'L', self.extrema(*args)):
yield t
def form_number(self):
"""
Returns the model's form number, a helpful heuristic for classifying tides.
"""
k1, o1, m2, s2 = (
np.extract(self.model['constituent'] == c, self.model['amplitude'])
for c in [constituent._K1, constituent._O1, constituent._M2, constituent._S2]
)
return (k1+o1)/(m2+s2)
def classify(self):
"""
Classify the tide according to its form number
"""
form = self.form_number()
if 0 <= form <= 0.25:
return 'semidiurnal'
elif 0.25 < form <= 1.5:
return 'mixed (semidiurnal)'
elif 1.5 < form <= 3.0:
return 'mixed (diurnal)'
else:
return 'diurnal'
def extrema(self, t0, t1 = None, partition = 2400.0):
"""
A generator for high and low tides.
Arguments:
t0 -- time after which extrema are sought
t1 -- optional time before which extrema are sought (if not given, the generator is infinite)
partition -- number of hours for which we consider the node factors to be constant (default: 2400.0)
"""
if t1:
#yield from in python 3.4
for e in takewhile(lambda t: t[0] < t1, self.extrema(t0)):
yield e
else:
#We assume that extrema are separated by at least delta hours
delta = np.amin([
90.0 / c.speed(astro(t0)) for c in self.model['constituent']
if not c.speed(astro(t0)) == 0
])
#We search for stationary points from offset hours before t0 to
#ensure we find any which might occur very soon after t0.
offset = 24.0
partitions = (
Tide._times(t0, i*partition) for i in count()), (Tide._times(t0, i*partition) for i in count(1)
)
#We'll overestimate to be on the safe side;
#values outside (start,end) won't get yielded.
interval_count = int(np.ceil((partition + offset) / delta)) + 1
amplitude = self.model['amplitude'][:, np.newaxis]
phase = d2r*self.model['phase'][:, np.newaxis]
for start, end in izip(*partitions):
speed, [u], [f], V0 = self.prepare(start, Tide._times(start, 0.5*partition))
#These derivatives don't include the time dependence of u or f,
#but these change slowly.
def d(t):
return np.sum(-speed*amplitude*f*np.sin(speed*t + (V0 + u) - phase), axis=0)
def d2(t):
return np.sum(-speed**2.0 * amplitude*f*np.cos(speed*t + (V0 + u) - phase), axis=0)
#We'll overestimate to be on the safe side;
#values outside (start,end) won't get yielded.
intervals = (
delta * i -offset for i in range(interval_count)), (delta*(i+1) - offset for i in range(interval_count)
)
for a, b in izip(*intervals):
if d(a)*d(b) < 0:
extrema = fsolve(d, (a + b) / 2.0, fprime = d2)[0]
time = Tide._times(start, extrema)
[height] = self.at([time])
hilo = 'H' if d2(extrema) < 0 else 'L'
if start < time < end:
yield (time, height, hilo)
@staticmethod
def _hours(t0, t):
"""
Return the hourly offset(s) of a (list of) time from a given time.
Arguments:
t0 -- time from which offsets are sought
t -- times to find hourly offsets from t0.
"""
if not isinstance(t, Iterable):
return Tide._hours(t0, [t])[0]
elif isinstance(t[0], datetime):
return np.array([(ti-t0).total_seconds() / 3600.0 for ti in t])
else:
return t
@staticmethod
def _partition(hours, partition = 3600.0):
"""
Partition a sorted list of numbers (or in this case hours).
Arguments:
hours -- sorted ndarray of hours.
partition -- maximum partition length (default: 3600.0)
"""
partition = float(partition)
relative = hours - hours[0]
total_partitions = np.ceil(relative[-1] / partition + 10*np.finfo(np.float).eps).astype('int')
return [hours[np.floor(np.divide(relative, partition)) == i] for i in range(total_partitions)]
@staticmethod
def _times(t0, hours):
"""
Return a (list of) datetime(s) given an initial time and an (list of) hourly offset(s).
Arguments:
t0 -- initial time
hours -- hourly offsets from t0
"""
if not isinstance(hours, Iterable):
return Tide._times(t0, [hours])[0]
elif not isinstance(hours[0], datetime):
return np.array([t0 + timedelta(hours=h) for h in hours])
else:
return np.array(hours)
@staticmethod
def _tidal_series(t, amplitude, phase, speed, u, f, V0):
return np.sum(amplitude*f*np.cos(speed*t + (V0 + u) - phase), axis=0)
def normalize(self):
"""
Adapt self.model so that amplitudes are positive and phases are in [0,360) as per convention
"""
for i, (_, amplitude, phase) in enumerate(self.model):
if amplitude < 0:
self.model['amplitude'][i] = -amplitude
self.model['phase'][i] = phase + 180.0
self.model['phase'][i] = np.mod(self.model['phase'][i], 360.0)
@classmethod
def decompose(
cls,
heights,
t = None,
t0 = None,
interval = None,
constituents = constituent.noaa,
initial = None,
n_period = 2,
callback = None,
full_output = False
):
"""
Return an instance of Tide which has been fitted to a series of tidal observations.
Arguments:
It is not necessary to provide t0 or interval if t is provided.
heights -- ndarray of tidal observation heights
t -- ndarray of tidal observation times
t0 -- datetime representing the time at which heights[0] was recorded
interval -- hourly interval between readings
constituents -- list of constituents to use in the fit (default: constituent.noaa)
initial -- optional Tide instance to use as first guess for least squares solver
n_period -- only include constituents which complete at least this many periods (default: 2)
callback -- optional function to be called at each iteration of the solver
full_output -- whether to return the output of scipy's leastsq solver (default: False)
"""
if t is not None:
if isinstance(t[0], datetime):
hours = Tide._hours(t[0], t)
t0 = t[0]
elif t0 is not None:
hours = t
else:
raise ValueError("t can be an array of datetimes, or an array "
"of hours since t0 in which case t0 must be "
"specified.")
elif None not in [t0, interval]:
hours = np.arange(len(heights)) * interval
else:
raise ValueError("Must provide t(datetimes), or t(hours) and "
"t0(datetime), or interval(hours) and t0(datetime) "
"so that each height can be identified with an "
"instant in time.")
#Remove duplicate constituents (those which travel at exactly the same
#speed, irrespective of phase)
constituents = list(OrderedDict.fromkeys(constituents))
#No need for least squares to find the mean water level constituent z0,
#work relative to mean
constituents = [c for c in constituents if not c == constituent._Z0]
z0 = np.mean(heights)
heights = heights - z0
#Only analyse frequencies which complete at least n_period cycles over
#the data period.
constituents = [
c for c in constituents
if 360.0 * n_period < hours[-1] * c.speed(astro(t0))
]
n = len(constituents)
sort = np.argsort(hours)
hours = hours[sort]
heights = heights[sort]
#We partition our time/height data into intervals over which we consider
#the values of u and f to assume a constant value (that is, their true
#value at the midpoint of the interval). Constituent
#speeds change much more slowly than the node factors, so we will
#consider these constant and equal to their speed at t0, regardless of
#the length of the time series.
partition = 240.0
t = Tide._partition(hours, partition)
times = Tide._times(t0, [(i + 0.5)*partition for i in range(len(t))])
speed, u, f, V0 = Tide._prepare(constituents, t0, times, radians = True)
#Residual to be minimised by variation of parameters (amplitudes, phases)
def residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
s = np.concatenate([
Tide._tidal_series(t_i, H, p, speed, u_i, f_i, V0)
for t_i, u_i, f_i in izip(t, u, f)
])
res = heights - s
if callback:
callback(res)
return res
#Analytic Jacobian of the residual - this makes solving significantly
#faster than just using gradient approximation, especially with many
#measurements / constituents.
def D_residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
ds_dH = np.concatenate([
f_i*np.cos(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
ds_dp = np.concatenate([
H*f_i*np.sin(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
return np.append(-ds_dH, -ds_dp, axis=0)
#Initial guess for solver, haven't done any analysis on this since the
#solver seems to converge well regardless of the initial guess We do
#however scale the initial amplitude guess with some measure of the
#variation
amplitudes = np.ones(n) * (np.sqrt(np.dot(heights, heights)) / len(heights))
phases = np.ones(n)
if initial:
for (c0, amplitude, phase) in initial.model:
for i, c in enumerate(constituents):
if c0 == c:
amplitudes[i] = amplitude
phases[i] = d2r*phase
initial = np.append(amplitudes, phases)
lsq = leastsq(residual, initial, Dfun=D_residual, col_deriv=True, ftol=1e-7)
model = np.zeros(1+n, dtype=cls.dtype)
model[0] = (constituent._Z0, z0, 0)
model[1:]['constituent'] = constituents[:]
model[1:]['amplitude'] = lsq[0][:n]
model[1:]['phase'] = lsq[0][n:]
if full_output:
return cls(model = model, radians = True), lsq
return cls(model = model, radians = True)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds perf trybots that can run telemetry tests."""
import json
import logging
import os
import re
import subprocess
import sys
import urllib2
from telemetry.core import platform
from telemetry import decorators
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import trybot_device
CHROMIUM_CONFIG_FILENAME = 'tools/run-perf-test.cfg'
BLINK_CONFIG_FILENAME = 'Tools/run-perf-test.cfg'
SUCCESS, NO_CHANGES, ERROR = range(3)
# Unsupported Perf bisect bots.
EXCLUDED_BOTS = {
'win_xp_perf_bisect',
'linux_perf_tester',
'linux_perf_bisector',
'win_perf_bisect_builder',
'win64_nv_tester',
'winx64_bisect_builder',
'linux_perf_bisect_builder',
'mac_perf_bisect_builder',
'android_perf_bisect_builder',
'android_arm64_perf_bisect_builder'
}
INCLUDE_BOTS = [
'trybot-all',
'trybot-all-win',
'trybot-all-mac',
'trybot-all-linux',
'trybot-all-android'
]
class TrybotError(Exception):
def __str__(self):
return '%s\nError running tryjob.' % self.args[0]
class PossibleTrybotBrowser(possible_browser.PossibleBrowser):
"""A script that sends a job to a trybot."""
def __init__(self, browser_type, _):
target_os = browser_type.split('-')[1]
self._builder_names = _GetBuilderNames(browser_type)
super(PossibleTrybotBrowser, self).__init__(browser_type, target_os, True)
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, finder_options):
if ((finder_options.device and finder_options.device != 'trybot') or
finder_options.chrome_root or
finder_options.cros_remote or
finder_options.extensions_to_load or
finder_options.profile_dir):
return False
return True
def IsRemote(self):
return True
def _RunProcess(self, cmd):
logging.debug('Running process: "%s"', ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
returncode = proc.poll()
return (returncode, out, err)
def _UpdateConfigAndRunTryjob(self, bot_platform, cfg_file_path):
"""Updates perf config file, uploads changes and excutes perf try job.
Args:
bot_platform: Name of the platform to be generated.
cfg_file_path: Perf config file path.
Returns:
(result, msg) where result is one of:
SUCCESS if a tryjob was sent
NO_CHANGES if there was nothing to try,
ERROR if a tryjob was attempted but an error encountered
and msg is an error message if an error was encountered, or rietveld
url if success, otherwise throws TrybotError exception.
"""
config = self._GetPerfConfig(bot_platform)
try:
config_file = open(cfg_file_path, 'w')
except IOError:
msg = 'Cannot find %s. Please run from src dir.' % cfg_file_path
return (ERROR, msg)
config_file.write('config = %s' % json.dumps(
config, sort_keys=True, indent=2, separators=(',', ': ')))
config_file.close()
# Commit the config changes locally.
returncode, out, err = self._RunProcess(
['git', 'commit', '-a', '-m', 'bisect config: %s' % bot_platform])
if returncode:
raise TrybotError('Could not commit bisect config change for %s,'
' error %s' % (bot_platform, err))
# Upload the CL to rietveld and run a try job.
returncode, out, err = self._RunProcess([
'git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on %s' % bot_platform
])
if returncode:
raise TrybotError('Could upload to rietveld for %s, error %s' %
(bot_platform, err))
match = re.search(r'https://codereview.chromium.org/[\d]+', out)
if not match:
raise TrybotError('Could not upload CL to rietveld for %s! Output %s' %
(bot_platform, out))
rietveld_url = match.group(0)
# Generate git try command for available bots.
git_try_command = ['git', 'cl', 'try', '-m', 'tryserver.chromium.perf']
for bot in self._builder_names[bot_platform]:
git_try_command.extend(['-b', bot])
returncode, out, err = self._RunProcess(git_try_command)
if returncode:
raise TrybotError('Could not try CL for %s, error %s' %
(bot_platform, err))
return (SUCCESS, rietveld_url)
def _GetPerfConfig(self, bot_platform):
"""Generates the perf config for try job.
Args:
bot_platform: Name of the platform to be generated.
Returns:
A dictionary with perf config parameters.
"""
# Generate the command line for the perf trybots
target_arch = 'ia32'
arguments = sys.argv
if bot_platform in ['win', 'win-x64']:
arguments[0] = 'python tools\\perf\\run_benchmark'
else:
arguments[0] = './tools/perf/run_benchmark'
for index, arg in enumerate(arguments):
if arg.startswith('--browser='):
if bot_platform == 'android':
arguments[index] = '--browser=android-chromium'
elif any('x64' in bot for bot in self._builder_names[bot_platform]):
arguments[index] = '--browser=release_x64'
target_arch = 'x64'
else:
arguments[index] = '--browser=release'
command = ' '.join(arguments)
return {
'command': command,
'repeat_count': '1',
'max_time_minutes': '120',
'truncate_percent': '0',
'target_arch': target_arch,
}
def _AttemptTryjob(self, cfg_file_path):
"""Attempts to run a tryjob from the current directory.
This is run once for chromium, and if it returns NO_CHANGES, once for blink.
Args:
cfg_file_path: Path to the config file for the try job.
Returns:
Returns SUCCESS if a tryjob was sent, NO_CHANGES if there was nothing to
try, ERROR if a tryjob was attempted but an error encountered.
"""
source_repo = 'chromium'
if cfg_file_path == BLINK_CONFIG_FILENAME:
source_repo = 'blink'
# TODO(prasadv): This method is quite long, we should consider refactor
# this by extracting to helper methods.
returncode, original_branchname, err = self._RunProcess(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
if returncode:
msg = 'Must be in a git repository to send changes to trybots.'
if err:
msg += '\nGit error: %s' % err
logging.error(msg)
return ERROR
original_branchname = original_branchname.strip()
# Check if the tree is dirty: make sure the index is up to date and then
# run diff-index
self._RunProcess(['git', 'update-index', '--refresh', '-q'])
returncode, out, err = self._RunProcess(['git', 'diff-index', 'HEAD'])
if out:
logging.error(
'Cannot send a try job with a dirty tree. Commit locally first.')
return ERROR
# Make sure the tree does have local commits.
returncode, out, err = self._RunProcess(
['git', 'log', 'origin/master..HEAD'])
if not out:
return NO_CHANGES
# Create/check out the telemetry-tryjob branch, and edit the configs
# for the tryjob there.
returncode, out, err = self._RunProcess(
['git', 'checkout', '-b', 'telemetry-tryjob'])
if returncode:
logging.error('Error creating branch telemetry-tryjob. '
'Please delete it if it exists.\n%s', err)
return ERROR
try:
returncode, out, err = self._RunProcess(
['git', 'branch', '--set-upstream-to', 'origin/master'])
if returncode:
logging.error('Error in git branch --set-upstream-to: %s', err)
return ERROR
for bot_platform in self._builder_names:
try:
results, output = self._UpdateConfigAndRunTryjob(
bot_platform, cfg_file_path)
if results == ERROR:
logging.error(output)
return ERROR
print ('Uploaded %s try job to rietveld for %s platform. '
'View progress at %s' % (source_repo, bot_platform, output))
except TrybotError, err:
print err
logging.error(err)
finally:
# Checkout original branch and delete telemetry-tryjob branch.
# TODO(prasadv): This finally block could be extracted out to be a
# separate function called _CleanupBranch.
returncode, out, err = self._RunProcess(
['git', 'checkout', original_branchname])
if returncode:
logging.error('Could not check out %s. Please check it out and '
'manually delete the telemetry-tryjob branch. '
': %s', original_branchname, err)
return ERROR # pylint: disable=lost-exception
logging.info('Checked out original branch: %s', original_branchname)
returncode, out, err = self._RunProcess(
['git', 'branch', '-D', 'telemetry-tryjob'])
if returncode:
logging.error('Could not delete telemetry-tryjob branch. '
'Please delete it manually: %s', err)
return ERROR # pylint: disable=lost-exception
logging.info('Deleted temp branch: telemetry-tryjob')
return SUCCESS
def RunRemote(self):
"""Sends a tryjob to a perf trybot.
This creates a branch, telemetry-tryjob, switches to that branch, edits
the bisect config, commits it, uploads the CL to rietveld, and runs a
tryjob on the given bot.
"""
# First check if there are chromium changes to upload.
status = self._AttemptTryjob(CHROMIUM_CONFIG_FILENAME)
if status not in [SUCCESS, ERROR]:
# If we got here, there are no chromium changes to upload. Try blink.
os.chdir('third_party/WebKit/')
status = self._AttemptTryjob(BLINK_CONFIG_FILENAME)
os.chdir('../..')
if status not in [SUCCESS, ERROR]:
logging.error('No local changes found in chromium or blink trees. '
'browser=%s argument sends local changes to the '
'perf trybot(s): %s.', self.browser_type,
self._builder_names.values())
def _InitPlatformIfNeeded(self):
if self._platform:
return
self._platform = platform.GetHostPlatform()
# pylint: disable=W0212
self._platform_backend = self._platform._platform_backend
def SelectDefaultBrowser(_):
return None
def CanFindAvailableBrowsers():
return True
@decorators.Cache
def _GetTrybotList():
f = urllib2.urlopen(
'http://build.chromium.org/p/tryserver.chromium.perf/json')
builders = json.loads(f.read()).get('builders', {}).keys()
builders = ['trybot-%s' % bot.replace('_perf_bisect', '').replace('_', '-')
for bot in builders if bot not in EXCLUDED_BOTS]
builders.extend(INCLUDE_BOTS)
return sorted(builders)
def _GetBuilderNames(browser_type):
""" Return platform and its available bot name as dictionary."""
if 'all' not in browser_type:
bot = ['%s_perf_bisect' % browser_type.replace(
'trybot-', '').replace('-', '_')]
bot_platform = browser_type.split('-')[1]
if 'x64' in browser_type:
bot_platform += '-x64'
return {bot_platform: bot}
f = urllib2.urlopen(
'http://build.chromium.org/p/tryserver.chromium.perf/json')
builders = json.loads(f.read()).get('builders', {}).keys()
# Exclude unsupported bots like win xp and some dummy bots.
builders = [bot for bot in builders if bot not in EXCLUDED_BOTS]
platform_and_bots = {}
for os_name in ['linux', 'android', 'mac', 'win']:
platform_and_bots[os_name] = [bot for bot in builders if os_name in bot]
# Special case for Windows x64, consider it as separate platform
# config config should contain target_arch=x64 and --browser=release_x64.
win_x64_bots = [platform_and_bots['win'].pop(i)
for i, win_bot in enumerate(platform_and_bots['win']) if 'x64' in win_bot]
platform_and_bots['win-x64'] = win_x64_bots
if 'all-win' in browser_type:
return {'win': platform_and_bots['win'],
'win-x64': platform_and_bots['win-x64']}
if 'all-mac' in browser_type:
return {'mac': platform_and_bots['mac']}
if 'all-android' in browser_type:
return {'android': platform_and_bots['android']}
if 'all-linux' in browser_type:
return {'linux': platform_and_bots['linux']}
return platform_and_bots
def FindAllBrowserTypes(finder_options):
# Listing browsers requires an http request; only do this if the user is
# running with browser=list or a browser=trybot-* argument.
if (finder_options.browser_type and
(finder_options.browser_type == 'list' or
finder_options.browser_type.startswith('trybot'))):
return _GetTrybotList()
return []
def FindAllAvailableBrowsers(finder_options, device):
"""Find all perf trybots on tryserver.chromium.perf."""
if not isinstance(device, trybot_device.TrybotDevice):
return []
return [PossibleTrybotBrowser(b, finder_options) for b in
FindAllBrowserTypes(finder_options)]
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Shell for interactive testing.
This file is taken from the Lantz Project.
:copyright: (c) 2014-2020 by PyVISA Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import cmd
from typing import List, Tuple
from . import ResourceManager, VisaIOError, attributes, constants
from .thirdparty import prettytable
# TODO providing a way to list/use constants would be nice
class VisaShell(cmd.Cmd):
"""Shell for interactive testing."""
intro: str = "\nWelcome to the VISA shell. Type help or ? to list commands.\n"
prompt: str = "(visa) "
use_rawinput: bool = True
def __init__(self, library_path: str = ""):
super().__init__()
self.resource_manager = ResourceManager(library_path)
self.default_prompt = self.prompt
#: Resource list (used for autocomplete)
#: Store a tuple with the name and the alias.
self.resources: List[Tuple[str, str]] = []
#: Resource in use
#: pyvisa.resources.Resource
self.current = None
self.py_attr: List[str] = []
self.vi_attr: List[str] = []
def do_list(self, args):
"""List all connected resources."""
try:
resources = self.resource_manager.list_resources_info()
except Exception as e:
print(e)
else:
self.resources = []
for ndx, (resource_name, value) in enumerate(resources.items()):
if not args:
print("({0:2d}) {1}".format(ndx, resource_name))
if value.alias:
print(" alias: {}".format(value.alias))
self.resources.append((resource_name, value.alias or None))
def do_open(self, args):
"""Open resource by number, resource name or alias: open 3"""
if not args:
print("A resource name must be specified.")
return
if self.current:
print(
"You can only open one resource at a time. Please close the current one first."
)
return
if args.isdigit():
try:
args = self.resources[int(args)][0]
except IndexError:
print('Not a valid resource number. Use the command "list".')
return
try:
self.current = self.resource_manager.open_resource(args)
print(
"{} has been opened.\n"
'You can talk to the device using "write", "read" or "query".\n'
"The default end of message is added to each message.".format(args)
)
self.py_attr = []
self.vi_attr = []
for attr in getattr(self.current, "visa_attributes_classes", ()):
if attr.py_name:
self.py_attr.append(attr.py_name)
self.vi_attr.append(attr.visa_name)
self.prompt = "(open) "
except Exception as e:
print(e)
def complete_open(self, text, line, begidx, endidx):
"""Provide completion on open."""
if not self.resources:
self.do_list("do not print")
return [item[0] for item in self.resources if item[0].startswith(text)] + [
item[1] for item in self.resources if item[1] and item[1].startswith(text)
]
def do_close(self, args):
"""Close resource in use."""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
try:
self.current.close()
except Exception as e:
print(e)
else:
print("The resource has been closed.")
self.current = None
self.prompt = self.default_prompt
def do_query(self, args):
"""Query resource in use: query *IDN?"""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
try:
print("Response: {}".format(self.current.query(args)))
except Exception as e:
print(e)
def do_read(self, args):
"""Receive from the resource in use."""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
try:
print(self.current.read())
except Exception as e:
print(e)
def do_write(self, args):
"""Send to the resource in use: send *IDN?"""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
try:
self.current.write(args)
except Exception as e:
print(e)
def do_timeout(self, args):
"""Get or set timeout (in ms) for resource in use.
Get timeout:
timeout
Set timeout:
timeout <mstimeout>
"""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
args = args.strip()
if not args:
try:
print("Timeout: {}ms".format(self.current.timeout))
except Exception as e:
print(e)
else:
args = args.split(" ")
try:
self.current.timeout = float(args[0])
print("Done")
except Exception as e:
print(e)
def print_attribute_list(self):
"""Print the supported attribute list."""
p = prettytable.PrettyTable(("VISA name", "Constant", "Python name", "val"))
for attr in getattr(self.current, "visa_attributes_classes", ()):
try:
val = self.current.get_visa_attribute(attr.attribute_id)
except VisaIOError as e:
val = e.abbreviation
except Exception as e:
val = str(e)
if len(val) > 10:
val = val[:10] + "..."
p.add_row((attr.visa_name, attr.attribute_id, attr.py_name, val))
print(p.get_string(sortby="VISA name"))
def do_attr(self, args): # noqa: C901
"""Get or set the state for a visa attribute.
List all attributes:
attr
Get an attribute state:
attr <name>
Set an attribute state:
attr <name> <state>
"""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
args = args.strip()
if not args:
self.print_attribute_list()
return
args = args.split(" ")
if len(args) > 2:
print(
"Invalid syntax, use `attr <name>` to get; or `attr <name> <value>` to set"
)
return
if len(args) == 1:
# Get a given attribute
attr_name = args[0]
if attr_name.startswith("VI_"):
try:
print(
self.current.get_visa_attribute(getattr(constants, attr_name))
)
except Exception as e:
print(e)
else:
try:
print(getattr(self.current, attr_name))
except Exception as e:
print(e)
return
# Set the specified attribute value
attr_name, attr_state = args[0], args[1]
if attr_name.startswith("VI_"):
try:
attributeId = getattr(constants, attr_name)
attr = attributes.AttributesByID[attributeId]
datatype = attr.visa_type
retcode = None
if datatype == "ViBoolean":
if attr_state == "True":
attr_state = True
elif attr_state == "False":
attr_state = False
else:
retcode = (
constants.StatusCode.error_nonsupported_attribute_state
)
elif datatype in [
"ViUInt8",
"ViUInt16",
"ViUInt32",
"ViInt8",
"ViInt16",
"ViInt32",
]:
try:
attr_state = int(attr_state)
except ValueError:
retcode = (
constants.StatusCode.error_nonsupported_attribute_state
)
if not retcode:
retcode = self.current.set_visa_attribute(attributeId, attr_state)
if retcode:
print("Error {}".format(str(retcode)))
else:
print("Done")
except Exception as e:
print(e)
else:
print("Setting Resource Attributes by python name is not yet supported.")
return
def complete_attr(self, text, line, begidx, endidx):
"""Provide completion for the attr command."""
return [item for item in self.py_attr if item.startswith(text)] + [
item for item in self.vi_attr if item.startswith(text)
]
def do_termchar(self, args):
"""Get or set termination character for resource in use.
<termchar> can be one of: CR, LF, CRLF, NUL or None.
None is used to disable termination character
Get termination character:
termchar
Set termination character read or read+write:
termchar <termchar> [<termchar>]
"""
if not self.current:
print('There are no resources in use. Use the command "open".')
return
args = args.strip()
if not args:
try:
charmap = {u"\r": "CR", u"\n": "LF", u"\r\n": "CRLF", u"\0": "NUL"}
chr = self.current.read_termination
if chr in charmap:
chr = charmap[chr]
chw = self.current.write_termination
if chw in charmap:
chw = charmap[chw]
print("Termchar read: {} write: {}".format(chr, chw))
except Exception as e:
print(e)
args = args.split(" ")
if len(args) > 2:
print(
"Invalid syntax, use `termchar <termchar>` to set both "
"read_termination and write_termination to the same value, or "
"`termchar <read_termchar> <write_termchar>` to use distinct values."
)
else:
charmap = {
"CR": u"\r",
"LF": u"\n",
"CRLF": u"\r\n",
"NUL": u"\0",
"None": None,
}
chr = args[0]
chw = args[0 if len(args) == 1 else 1]
if chr in charmap and chw in charmap:
try:
self.current.read_termination = charmap[chr]
self.current.write_termination = charmap[chw]
print("Done")
except Exception as e:
print(e)
else:
print("use CR, LF, CRLF, NUL or None to set termchar")
return
def do_exit(self, arg):
"""Exit the shell session."""
if self.current:
self.current.close()
self.resource_manager.close()
del self.resource_manager
return True
def do_EOF(self, arg):
"""Handle an EOF."""
return True
def main(library_path=""):
"""Main entry point to start the shell."""
VisaShell(library_path).cmdloop()
|
|
from core.app import App, AppParameter, AppFile
import unittest
import shutil
import pdb
class TestAppParameter(unittest.TestCase):
"""
test for App Parameters
"""
def test_check_error_parameter_type(self):
param = {'type': 'error type'}
self.assertRaises(TypeError, AppParameter, param)
def test_number(self):
num_param = {
'separator': " ",
'prefix': "-n",
'type': 'number',
'required': True,
'minvalue': 0,
'maxvalue': 100,
'hint': 'number test',
'default': 6
}
param = AppParameter(num_param)
self.assertEqual(param.__str__(), '-n 6')
def test_string(self):
string_param = {
'separator': " ",
'prefix': "-db",
'type': 'string',
'required': True,
'quotes': False,
'hint': 'working space',
'default': 'nt',
'quotes': False,
'hint': 'working space'
}
param = AppParameter(string_param)
self.assertEqual(param.__str__(), '-db nt')
string_param['quotes'] = True
param = AppParameter(string_param)
self.assertEqual(param.__str__(), "-db 'nt'")
def test_array(self):
array_param = {
"separator": '=',
"prefix": '-taxid',
"type": 'array',
"required": True,
"minitems": 1,
"maxitems": 100,
"item":{
"type": 'string',
"item_quotes": True,
"separator": ' ',
"is_split": True,
},
"hint": 'array test',
"default": ['4', '5', '6', '7']
}
param = AppParameter(array_param)
self.assertEqual(param.__str__(), "-taxid='4' -taxid='5' -taxid='6' -taxid='7'")
# print param
(array_param['item']['is_split'], array_param['item']['item_quotes']) = (True, False)
param = AppParameter(array_param)
self.assertEqual(param.__str__(), "-taxid=4 -taxid=5 -taxid=6 -taxid=7")
# print param
(array_param['item']['is_split'], array_param['item']['item_quotes']) = (False, True)
param = AppParameter(array_param)
self.assertEqual(param.__str__(), "-taxid='4' '5' '6' '7'")
# print param
(array_param['item']['is_split'], array_param['item']['item_quotes']) = (False, False)
param = AppParameter(array_param)
self.assertEqual(param.__str__(), "-taxid=4 5 6 7")
# print param
array_param['item']['separator'] = ','
param = AppParameter(array_param)
self.assertEqual(param.__str__(), "-taxid=4,5,6,7")
# print param
array_param['separator'] = ' '
param = AppParameter(array_param)
self.assertEqual(param.__str__(), "-taxid 4,5,6,7")
# print param
def test_flag(self):
flag_param = {
'separator': ' ',
'prefix': '-pe',
'type': 'flag',
'required': True,
'hint': 'flag test',
'default': True
}
param = AppParameter(flag_param)
self.assertEqual(param.__str__(), "-pe")
flag_param['value']=False
param = AppParameter(flag_param)
self.assertEqual(param.__str__(), "")
def test_boolean(self):
boolean_param = {
'separator': ' ',
'prefix': '-pe',
'type': 'boolean',
'required': True,
'hint': 'boolean test',
'default': True
}
param = AppParameter(boolean_param)
self.assertEqual(param.__str__(), "-pe True")
boolean_param['value']=False
param = AppParameter(boolean_param)
self.assertEqual(param.__str__(), "-pe False")
class TestAppFile(unittest.TestCase):
"""docstring for TestAppFile"""
def setUp(self):
self.file = {
'type': 'file',
'required': True,
'minitems': 1,
'maxitems': 1,
'item':{
'separator': " "
},
'formats': ['tgz']
}
def test_path(self):
output = AppFile(self.file)
self.assertEqual(output.path[:10], "/var/data/")
self.assertEqual(output.path[-3:], "tgz")
def test_enid(self):
output = AppFile(self.file)
self.assertEqual(len(output.enid), 32)
def test_format(self):
self.file['formats'] = 'bam'
output = AppFile(self.file)
self.assertEqual(output.path[-3:], "bam")
def test_name(self):
self.file['name'] = '/path/to/data'
output = AppFile(self.file)
self.assertEqual(output.path, "/path/to/data")
class TestApp(unittest.TestCase):
"""docstring for TestApp"""
def setUp(self):
#setUp before each test
self.app = App('test/test_app')
self.app.new()
self.app.load()
def tearDown(self):
# tearDown after each test
shutil.rmtree('test/test_app')
def test_load(self):
self.assertEqual(self.app.config['app']['name'], 'app name')
# print app.config
def test_new(self):
#done in setUP
pass
def test_setParameters(self):
self.app.parameters['Inputs']['bam']={'data': [{"name":"/path/to/data1"}, {"name":"/path/to/data2"}]}
self.app.setParameters()
self.assertEqual(self.app['inputs']['bam'][0].path, "/path/to/data1")
self.assertEqual(self.app['outputs']['results'][0].path, "/var/data/80bad55acc41d5fde324415808d0a700.tgz")
#try setParameters ater newParameters
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.parameters['Parameters']['workspace']['value'] = '/path/to/data3'
self.app.setParameters()
self.assertEqual(self.app['parameters']['workspace'].__str__(), '/path/to/data3')
def test_newParameters(self):
self.app.newParameters()
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.newParameters()
# print self.app.parameters
def test_newParameters_after_setParameters(self):
self.app.setParameters()
self.app.newParameters('test/test_app/test_parameter.yaml')
def test_newParameters_before_setParameters(self):
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.setParameters()
def test_newParameters_and_setParameters_more(self):
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.setParameters()
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.setParameters()
def test_newParameters_and_setParameters_more2(self):
self.app.setParameters()
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.setParameters()
self.app.newParameters('test/test_app/test_parameter.yaml')
def test_loadParameters(self):
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.loadParameters('test/test_app/test_parameter.yaml')
def test_loadParameters_None(self):
self.assertRaises(ValueError, self.app.loadParameters)
def test_nodes(self):
load_case = {'bam': {'alias': 'load bam', 'node_id': 'loaddata_bam', 'name': 'loaddata', 'parameters': None, 'inputs': None, 'outputs': {'data': {'enid': 'bam'}}, 'type': 'system', 'app_id': '55128c58f6f4067d63b956b5'}}
self.assertEqual(cmp(self.app.nodes('load'), load_case), 0)
store_case = {'results': {'alias': 'store results', 'node_id': 'storedata_results', 'name': 'storedata', 'parameters': {'description': {'variable': True, 'value': None}, 'name': {'variable': True, 'value': None}}, 'inputs': {'data': {'enid': 'results'}}, 'outputs': None, 'type': 'system', 'app_id': '55128c94f6f4067d63b956b6'}}
self.assertEqual(cmp(self.app.nodes('store'), store_case), 0)
app_case = {'app name': {'inputs': {'bam': [{'enid': 'bam'}]}, 'name': 'app name', 'parameters': {'workspace': {'variable': False, 'value': None}, 'is_genedock': {'variable': False, 'value': None}}, 'outputs': {'results': [{'enid': 'results'}]}, 'app_id': '', 'alias': 'app name', 'node_id': 'app_name', 'type': 'private'}}
self.assertEqual(cmp(self.app.nodes('app'), app_case), 0)
def test_workflow(self):
self.app.buildTestWorkflow()
test_case = {'workflow': {'account': '[email protected]', 'version': 1, 'nodelist': [{'alias': 'load bam', 'node_id': 'loaddata_bam', 'name': 'loaddata', 'parameters': None, 'inputs': None, 'outputs': {'data': {'enid': 'bam'}}, 'type': 'system', 'app_id': '55128c58f6f4067d63b956b5'}, {'inputs': {'bam': [{'enid': 'bam'}]}, 'name': 'app name', 'parameters': {'workspace': {'variable': False, 'value': None}, 'is_genedock': {'variable': False, 'value': None}}, 'outputs': {'results': [{'enid': 'results'}]}, 'app_id': '', 'alias': 'app name', 'node_id': 'app_name', 'type': 'private'}, {'alias': 'store results', 'node_id': 'storedata_results', 'name': 'storedata', 'parameters': {'description': {'variable': True, 'value': None}, 'name': {'variable': True, 'value': None}}, 'inputs': {'data': {'enid': 'results'}}, 'outputs': None, 'type': 'system', 'app_id': '55128c94f6f4067d63b956b6'}], 'name': 'test_app name', 'description': 'test_app name'}}
self.assertEqual(cmp(self.app.workflow, test_case), 0)
def test_renderScript(self):
self.app.newParameters()
self.app.parameters['Parameters']['is_genedock']['value']=True
self.app.config['app']['outputs']=None
self.app.setParameters()
self.app.renderScript()
# print [self.app.script]
self.assertEqual(self.app.script, u'\nmkdir -p /data/project/id;\n\nln -s /var/data/540ef712ea55aa2db8a4cfea4782c74d.bam /data/project/id/samplename.bam;\n\n')
def test_build(self):
test_case = u'\nmkdir -p /data/project/id;\n\nln -s /var/data/540ef712ea55aa2db8a4cfea4782c74d.bam /data/project/id/samplename.bam;\n\n'
self.app.build(None, '/dev/null')
self.assertEqual(self.app.script, test_case)
self.app.newParameters('test/test_app/test_parameter.yaml')
self.app.build('test/test_app/test_parameter.yaml', '/dev/null')
self.assertEqual(self.app.script, test_case)
def test_dumpYaml(self):
self.app.setParameters()
self.app.renderScript()
# pdb.set_trace()
# self.app.dumpYaml(self.app.script, None)
if __name__ == '__main__':
unittest.main()
|
|
import argparse
import glob
import cv2
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, Lambda, Activation
from keras.layers import Conv2D, MaxPooling2D, concatenate, Input
from keras.callbacks import TensorBoard
from keras.models import load_model, Model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
import tqdm
from moviepy.editor import VideoFileClip
from scipy.ndimage.measurements import label
from sklearn.model_selection import train_test_split
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import json
from PIL import Image, ImageDraw, ImageFont
import multiprocessing
matplotlib.style.use('ggplot')
import logging
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
# Custom logger class with multiple destinations
class ColoredLogger(logging.Logger):
FORMAT = "[$BOLD%(name)-20s$RESET][%(levelname)-18s] %(message)s ($BOLD%(filename)s$RESET:%(lineno)d)"
COLOR_FORMAT = formatter_message(FORMAT, True)
def __init__(self, name):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(console)
return
logging.setLoggerClass(ColoredLogger)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
####################################################### MAIN CODE ########################################################
class Frames:
def __init__(self):
self._initialized = False
self._current_frame = 0
self._prev_bboxes = []
def init(self, img):
self._heatmap = np.zeros_like(img)
def _add_heat(self, bbox_list):
for box in bbox_list:
self._heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
return self._heatmap
def _apply_threshold(self, threshold):
self._heatmap[self._heatmap <= threshold] = 0
return self._heatmap
def get_labels(self, bboxes, threshold):
if len(self._prev_bboxes) > threshold:
# Then remove the last bbox list from the previous frames
self._prev_bboxes.pop(0)
for pbboxes in self._prev_bboxes:
self._add_heat(pbboxes)
self._add_heat(bboxes)
# Add the latest one
self._prev_bboxes.append(bboxes)
# Figure out the thresholded value
# self._apply_threshold(threshold)
labels = label(self._heatmap)
bboxes = []
# Iterate through all detected cars
for car_number in range(1, labels[1] + 1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
bboxes.append(bbox)
# Get a viewable heatmap
heatmap = np.clip(self._heatmap, 0, 255)
heatmap[heatmap[:, :, 0] > 0] += 100
heatmap[:, :, 1] = 0
heatmap[:, :, 2] = 0
return bboxes, heatmap
frames = Frames()
def overlay_text(image, text, pos=(0, 0), color=(255, 255, 255)):
image = Image.fromarray(image)
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("./fonts/liberation-sans.ttf", 64)
draw.text(pos, text, color, font=font)
image = np.asarray(image)
return image
def overlay_image(img1, img2):
img1[0:img2.shape[0], 0:img2.shape[1]] = img2[:, :]
return img1
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 255, 0), thick=2):
# Make a copy of the image
imcopy = np.copy(img)
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
class LeNet:
@staticmethod
def build(width, height, depth, weightsPath=None):
model = Sequential()
# First set Conv Layers
model.add(Conv2D(8, (3, 3), padding='valid', input_shape=(width, height, depth), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(BatchNormalization())
# 2nd set Conv layers
model.add(Conv2D(16, (3, 3), padding='valid', input_shape=(width, height, depth), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# Set of FC => Relu layers
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# Softmax classifier
model.add(Dense(1))
model.add(Activation('sigmoid'))
if weightsPath is not None:
model.load_weights(weightsPath)
return model
class SimpleInception:
@staticmethod
def build(width, height, depth, weightsPath=None):
input_img = Input(shape=(width, height, depth))
model = Sequential()
# First set Conv Layers
tower_1 = Conv2D(32, (1, 1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(32, (3, 3), padding='same', activation='relu')(tower_1)
tower_2 = Conv2D(32, (1, 1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(32, (5, 5), padding='same', activation='relu')(tower_2)
tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
tower_3 = Conv2D(32, (1, 1), padding='same', activation='relu')(tower_3)
concat = concatenate([tower_1, tower_2, tower_3], axis=3)
# Set of FC => Relu layers
flatten = Flatten()(concat)
dense1 = (Dense(256)(flatten))
activation1 = Activation('relu')(dense1)
dropout1 = Dropout(0.5)(activation1)
# Softmax classifier
dense2 = Dense(1)(dropout1)
output = Activation('sigmoid')(dense2)
model = Model(inputs=input_img, outputs=output)
if weightsPath is not None:
model.load_weights(weightsPath)
return model
def read_image(filename):
logger.debug("Reading an image")
img = mpimg.imread(filename)
return img
def create_training_data():
logger.info("Creating Training Data")
vehicles = []
for filename in tqdm.tqdm(glob.iglob('training/vehicles/**/*.png', recursive=True)):
img = read_image(filename)
vehicles.append(img)
nonvehicles = []
for filename in tqdm.tqdm(glob.iglob('training/non-vehicles/**/*.png', recursive=True)):
img = read_image(filename)
nonvehicles.append(img)
return vehicles, nonvehicles
def train_model(vehicles, non_vehicles):
generator = ImageDataGenerator( featurewise_center=True,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=20.,
width_shift_range=0.4,
height_shift_range=0.4,
shear_range=0.2,
zoom_range=0.2,
channel_shift_range=0.1,
fill_mode='nearest',
horizontal_flip=True,
vertical_flip=False,
rescale=1.2,
preprocessing_function=None)
logger.info("Training the Model")
vehicles_labels = np.ones(len(vehicles))
non_vehicles_labels = np.zeros(len(non_vehicles))
labels = np.hstack((vehicles_labels, non_vehicles_labels))
data = np.array(vehicles + non_vehicles)
if len(vehicles[0].shape) == 3:
width, height, depth = vehicles[0].shape[1], vehicles[0].shape[0], vehicles[0].shape[2]
else:
width, height, depth = vehicles[0].shape[1], vehicles[0].shape[0], 1
# model = LeNet.build(width, height, depth)
model = SimpleInception.build(width, height, depth)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
trainData, testData, trainLabels, testLabels = train_test_split(data, labels, random_state=20)
filepath = "inception.best.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
generator.fit(trainData)
hist = model.fit_generator(generator.flow(trainData, trainLabels, batch_size=16),
steps_per_epoch= int(len(trainData) / 16),
epochs=30,
verbose=1,
validation_data=(testData, testLabels),
callbacks=[TensorBoard(log_dir='logs'), checkpoint])
print("[INFO] dumping weights to file...")
model.save("inception.h5", overwrite=True)
model.save_weights("inception_weights.hdf5", overwrite=True)
fp = open("history_inception.json", 'w')
json.dump(hist.history, fp)
def generate_sliding_windows_old(img, window_sizes):
height, width = img.shape[0], img.shape[1]
# x_start = width // 2 - 100
x_start = 0
x_stop = width
y_start = height // 2 + 20
y_stop = height - 70
current_x = x_start
current_y = y_start
# Towards the bottom of the image use bigger bounding boxes
window_list = []
for (window_size, overlap) in window_sizes:
while current_x < x_stop:
end_x = current_x + window_size[0]
while current_y < y_stop:
end_y = current_y + window_size[1]
window_list.append(((int(current_x), int(current_y)), (int(end_x), int(end_y))))
current_y = end_y - window_size[1] * overlap[1]
# At this point reset the x and update the y
current_y = y_start
current_x = end_x - (window_size[0] * overlap[0])
return window_list
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0] * (1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1] * (1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0] * (xy_overlap[0]))
ny_buffer = np.int(xy_window[1] * (xy_overlap[1]))
nx_windows = np.int((xspan - nx_buffer) / nx_pix_per_step)
ny_windows = np.int((yspan - ny_buffer) / ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs * nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys * ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
def generate_sliding_windows(img):
n_win_size = 3
min_size = (30, 30)
max_size = (120, 120)
roi_upper = 380
roi_lower = 650
win_step_w = int((max_size[0] - min_size[0]) / n_win_size)
win_step_h = int((max_size[1] - min_size[1]) / n_win_size)
window_sizes = [(min_size[0] + i * win_step_w, min_size[1] + i * win_step_h) for i in range(n_win_size + 1)]
all_windows = []
for win_size in window_sizes:
windows = slide_window(img, x_start_stop=[None, None], y_start_stop=[roi_upper, roi_lower],
xy_window=win_size, xy_overlap=(0.5, 0.5))
all_windows += windows
f = open('all_windows_mine.csv', 'w')
for w in all_windows:
f.write(str(w))
f.write("\n")
f.close()
return all_windows
def crop_and_predict(idx, img, window, spatial_size):
global model
cropped = img[window[0][1]:window[1][1], window[0][0]:window[1][0]]
# cv2.imwrite("cropped/" + str(idx) + ".png", cropped)
cropped = cv2.resize(cropped, spatial_size)
cropped = np.array([cropped])
return (window, cropped)
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap # Iterate through list of bboxes
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,255,0), 5)
# Return the image
return img
counter = 0
window_sizes = [[(30, 30), (0.5, 0.5)],
[(60, 60), (0.5, 0.5)],
[(90, 90), (0.5, 0.5)],
[(120, 120), (0.5, 0.5)]]
window_list = None
last_heat_map = None
use_n_frames = 3
def prediction_pipeline(img):
global counter, model, frames, window_list, last_heat_map, use_n_frames
# Normalize the image
logger.debug("Scaling the image colors to 0-1")
# window_list = generate_sliding_windows(img)
if window_list is None:
window_list = generate_sliding_windows(img)
frames.init(img)
spatial_size = (64, 64)
prediction_images = []
original_cropped_images = []
for idx, window in enumerate(window_list):
# In numpy the x & Y directions are reversed
cropped = img[window[0][1]:window[1][1], window[0][0]:window[1][0]]
cropped_copy = np.copy(cropped)
original_cropped_images.append(cropped_copy)
cropped = cv2.resize(cropped, spatial_size)
cropped = cropped / 255
prediction_images.append(cropped)
prediction_images = np.array(prediction_images)
prediction = np.round(model.predict(prediction_images))
found_cars = [window_list[i] for i in np.where(prediction == 1)[0]]
found_car_idx = set([i for i in np.where(prediction == 1)[0]])
for idx, window in enumerate(window_list):
fname_prefix = "not_car"
if idx in found_car_idx:
fname_prefix = "car"
mpimg.imsave("cropped/frame_" + str(counter) + "_" + str(idx) + "_" + fname_prefix + ".png", original_cropped_images[idx])
# Now filter out the False positives
# Define heatmap
heat = np.zeros_like(img[:, :, 0]).astype(np.float)
# Add heat to each box in box list
heat = add_heat(heat, found_cars)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, 1)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
heatmap = heatmap.reshape(1, *heatmap.shape)
if last_heat_map is None:
last_heat_map = heatmap
heatmap = last_heat_map[0]
else:
last_heat_map = last_heat_map[:use_n_frames, :]
last_heat_map = np.concatenate([heatmap, last_heat_map])
heatmap = last_heat_map.mean(axis=0)
# Find final boxes from heatmap using label function
labels = label(heatmap)
new_img = draw_labeled_bboxes(np.copy(img), labels)
# found_cars, heatmap = frames.get_labels(found_cars, threshold=3)
heatmap = cv2.resize(heatmap, (heatmap.shape[1] // 4, heatmap.shape[0] // 4))
heatmap = np.dstack((heatmap, heatmap, heatmap)) * 255
heatmap[:, :, 1] = 0
heatmap[:, :, 2] = 0
new_img = overlay_image(new_img, heatmap)
# found_cars = window_list
# new_img = draw_boxes(original, window_list)
# new_img = draw_boxes(original, bboxes, color=(0, 1, 0), thick=2)
mpimg.imsave('video_imgs/' + str(counter) + ".png", new_img)
counter += 1
return new_img
def detection_on_video():
global model
# Load the model
# model = LeNet.build(64, 64, 3, 'weights.h5')
# model = load_model('lenet.h5')
model = load_model('inception.best.h5')
filename = 'project_video.mp4'
# clip = VideoFileClip(filename).subclip(21, 23)
clip = VideoFileClip(filename)
output_clip = clip.fl_image(prediction_pipeline)
output_clip.write_videofile("output_" + filename, audio=False)
def argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--action", help="Perform Training", type=str, choices=["train", "test", "test_img"], default="train")
parser.add_argument("--file", help="File to perform action on", type=str)
args = parser.parse_args()
return args
def main():
global model
args = argument_parser()
if args.action == "train":
vehicles, nonvehicles = create_training_data()
train_model(vehicles, nonvehicles)
elif args.action == "test":
detection_on_video()
elif args.action == "test_img":
model = load_model('inception.best.h5')
img = read_image(args.file)
new_img = prediction_pipeline(img) / 255
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
ax.imshow(new_img)
ax.axis('off')
plt.show()
if __name__ == "__main__":
main()
|
|
# Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import threading
import time
import types as stdlib_types
import mock
import pytest
from google.api_core import bidi
from google.api_core import exceptions
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber import client
from google.cloud.pubsub_v1.subscriber import message
from google.cloud.pubsub_v1.subscriber import scheduler
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import leaser
from google.cloud.pubsub_v1.subscriber._protocol import messages_on_hold
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
from google.cloud.pubsub_v1.subscriber import exceptions as subscriber_exceptions
from google.cloud.pubsub_v1.subscriber import futures
from google.pubsub_v1 import types as gapic_types
import grpc
from google.rpc import status_pb2
from google.rpc import code_pb2
from google.rpc import error_details_pb2
@pytest.mark.parametrize(
"exception,expected_cls",
[
(ValueError("meep"), ValueError),
(
mock.create_autospec(grpc.RpcError, instance=True),
exceptions.GoogleAPICallError,
),
({"error": "RPC terminated"}, Exception),
("something broke", Exception),
],
)
def test__wrap_as_exception(exception, expected_cls):
assert isinstance(
streaming_pull_manager._wrap_as_exception(exception), expected_cls
)
def test__wrap_callback_errors_no_error():
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock()
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
callback.assert_called_once_with(msg)
msg.nack.assert_not_called()
on_callback_error.assert_not_called()
def test__wrap_callback_errors_error():
callback_error = ValueError("meep")
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock(side_effect=callback_error)
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
msg.nack.assert_called_once()
on_callback_error.assert_called_once_with(callback_error)
def test_constructor_and_default_state():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client, mock.sentinel.subscription
)
# Public state
assert manager.is_active is False
assert manager.flow_control == types.FlowControl()
assert manager.dispatcher is None
assert manager.leaser is None
assert manager.ack_histogram is not None
assert manager.ack_deadline == 10
assert manager.load == 0
# Private state
assert manager._client == mock.sentinel.client
assert manager._subscription == mock.sentinel.subscription
assert manager._scheduler is not None
assert manager._messages_on_hold is not None
assert manager._client_id is not None
def test_constructor_with_options():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client,
mock.sentinel.subscription,
flow_control=mock.sentinel.flow_control,
scheduler=mock.sentinel.scheduler,
)
assert manager.flow_control == mock.sentinel.flow_control
assert manager._scheduler == mock.sentinel.scheduler
def make_manager(**kwargs):
client_ = mock.create_autospec(client.Client, instance=True)
scheduler_ = mock.create_autospec(scheduler.Scheduler, instance=True)
return streaming_pull_manager.StreamingPullManager(
client_, "subscription-name", scheduler=scheduler_, **kwargs
)
def complete_modify_ack_deadline_calls(dispatcher):
def complete_futures(*args, **kwargs):
modack_requests = args[0]
for req in modack_requests:
if req.future:
req.future.set_result(subscriber_exceptions.AcknowledgeStatus.SUCCESS)
dispatcher.modify_ack_deadline.side_effect = complete_futures
def fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10):
"""Add a simplified fake add() method to a leaser instance.
The fake add() method actually increases the leaser's internal message count
by one for each message, and the total bytes by ``assumed_msg_size`` for
each message (regardless of the actual message size).
"""
def fake_add(self, items):
self.message_count += len(items)
self.bytes += len(items) * assumed_msg_size
leaser.message_count = init_msg_count
leaser.bytes = init_msg_count * assumed_msg_size
leaser.add = stdlib_types.MethodType(fake_add, leaser)
def test__obtain_ack_deadline_no_custom_flow_control_setting():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
# Make sure that min_duration_per_lease_extension and
# max_duration_per_lease_extension is disabled.
manager._flow_control = types.FlowControl(
min_duration_per_lease_extension=0, max_duration_per_lease_extension=0
)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE
# When we get some historical data, the deadline is adjusted.
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE * 2)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE * 2
# Adding just a single additional data point does not yet change the deadline.
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE * 2
def test__obtain_ack_deadline_with_max_duration_per_lease_extension():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
manager._flow_control = types.FlowControl(
max_duration_per_lease_extension=histogram.MIN_ACK_DEADLINE + 1
)
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE * 3) # make p99 value large
# The deadline configured in flow control should prevail.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE + 1
def test__obtain_ack_deadline_with_min_duration_per_lease_extension():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
manager._flow_control = types.FlowControl(
min_duration_per_lease_extension=histogram.MAX_ACK_DEADLINE
)
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE) # make p99 value small
# The deadline configured in flow control should prevail.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MAX_ACK_DEADLINE
def test__obtain_ack_deadline_with_max_duration_per_lease_extension_too_low():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
manager._flow_control = types.FlowControl(
max_duration_per_lease_extension=histogram.MIN_ACK_DEADLINE - 1
)
# The deadline configured in flow control should be adjusted to the minimum allowed.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE
def test__obtain_ack_deadline_with_min_duration_per_lease_extension_too_high():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
manager._flow_control = types.FlowControl(
min_duration_per_lease_extension=histogram.MAX_ACK_DEADLINE + 1
)
# The deadline configured in flow control should be adjusted to the maximum allowed.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MAX_ACK_DEADLINE
def test__obtain_ack_deadline_with_exactly_once_enabled():
manager = make_manager()
manager._flow_control = types.FlowControl(
min_duration_per_lease_extension=0 # leave as default value
)
manager._exactly_once_enabled = True
manager.ack_histogram.add(
10
) # reduce p99 value below 60s min for exactly_once subscriptions
deadline = manager._obtain_ack_deadline(maybe_update=True)
# Since the 60-second min ack_deadline value for exactly_once subscriptions
# seconds is higher than the histogram value, the deadline should be 60 sec.
assert deadline == 60
def test__obtain_ack_deadline_with_min_duration_per_lease_extension_with_exactly_once_enabled():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
manager._flow_control = types.FlowControl(
min_duration_per_lease_extension=histogram.MAX_ACK_DEADLINE
)
manager._exactly_once_enabled = True
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE) # make p99 value small
# The deadline configured in flow control should prevail.
deadline = manager._obtain_ack_deadline(maybe_update=True)
# User-defined custom min ack_deadline value takes precedence over
# exactly_once default of 60 seconds.
assert deadline == histogram.MAX_ACK_DEADLINE
def test__obtain_ack_deadline_no_value_update():
manager = make_manager()
# Make sure that max_duration_per_lease_extension is disabled.
manager._flow_control = types.FlowControl(max_duration_per_lease_extension=0)
manager.ack_histogram.add(21)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == 21
for _ in range(5):
manager.ack_histogram.add(35) # Gather some new ACK data.
deadline = manager._obtain_ack_deadline(maybe_update=False)
assert deadline == 21 # still the same
# Accessing the value through the ack_deadline property has no side effects either.
assert manager.ack_deadline == 21
# Updating the ack deadline is reflected on ack_deadline wrapper, too.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert manager.ack_deadline == deadline == 35
def test_client_id():
manager1 = make_manager()
request1 = manager1._get_initial_request(stream_ack_deadline_seconds=10)
client_id_1 = request1.client_id
assert client_id_1
manager2 = make_manager()
request2 = manager2._get_initial_request(stream_ack_deadline_seconds=10)
client_id_2 = request2.client_id
assert client_id_2
assert client_id_1 != client_id_2
def test_streaming_flow_control():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
request = manager._get_initial_request(stream_ack_deadline_seconds=10)
assert request.max_outstanding_messages == 10
assert request.max_outstanding_bytes == 1000
def test_streaming_flow_control_use_legacy_flow_control():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000),
use_legacy_flow_control=True,
)
request = manager._get_initial_request(stream_ack_deadline_seconds=10)
assert request.max_outstanding_messages == 0
assert request.max_outstanding_bytes == 0
def test_maybe_pause_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_pause_consumer() # no raise
# Ensure load > 1
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 100
_leaser.bytes = 10000
manager.maybe_pause_consumer() # no raise
def test_lease_load_and_pause():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# This should mean that our messages count is at 10%, and our bytes
# are at 15%; load should return the higher (0.15), and shouldn't cause
# the consumer to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="one", byte_size=150, ordering_key="")]
)
assert manager.load == 0.15
manager.maybe_pause_consumer()
manager._consumer.pause.assert_not_called()
# After this message is added, the messages should be higher at 20%
# (versus 16% for bytes).
manager.leaser.add(
[requests.LeaseRequest(ack_id="two", byte_size=10, ordering_key="")]
)
assert manager.load == 0.2
# Returning a number above 100% is fine, and it should cause this to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="three", byte_size=1000, ordering_key="")]
)
assert manager.load == 1.16
manager.maybe_pause_consumer()
manager._consumer.pause.assert_called_once()
def test_drop_and_resume():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = True
# Add several messages until we're over the load threshold.
manager.leaser.add(
[
requests.LeaseRequest(ack_id="one", byte_size=750, ordering_key=""),
requests.LeaseRequest(ack_id="two", byte_size=250, ordering_key=""),
]
)
assert manager.load == 1.0
# Trying to resume now should have no effect as we're over the threshold.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
# Drop the 200 byte message, which should put us under the resume
# threshold.
manager.leaser.remove(
[requests.DropRequest(ack_id="two", byte_size=250, ordering_key="")]
)
manager.maybe_resume_consumer()
manager._consumer.resume.assert_called_once()
def test_resume_not_paused():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# Resuming should have no effect is the consumer is not actually paused.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
def test_maybe_resume_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_resume_consumer() # no raise
def test__maybe_release_messages_on_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=11)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = msg.size
# Ensure load is exactly 1.0 (to verify that >= condition is used)
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 10
_leaser.bytes = 1000 + msg.size
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
manager._leaser.add.assert_not_called()
manager._scheduler.schedule.assert_not_called()
def test__maybe_release_messages_below_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._callback = mock.sentinel.callback
# Init leaser message count to 11, so that when subtracting the 3 messages
# that are on hold, there is still room for another 2 messages before the
# max load is hit.
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
fake_leaser_add(_leaser, init_msg_count=11, assumed_msg_size=10)
messages = [
mock.create_autospec(message.Message, instance=True, ack_id="ack_foo", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_bar", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_baz", size=10),
]
for msg in messages:
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 3 * 10
# the actual call of MUT
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
msg = manager._messages_on_hold.get()
assert msg.ack_id == "ack_baz"
schedule_calls = manager._scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for _, call_args, _ in schedule_calls:
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].ack_id in ("ack_foo", "ack_bar")
def test__maybe_release_messages_negative_on_hold_bytes_warning(caplog):
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._callback = lambda msg: msg # pragma: NO COVER
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=17)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 5 # too low for some reason
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 3
_leaser.bytes = 150
with caplog.at_level(logging.WARNING):
manager._maybe_release_messages()
expected_warnings = [
record.message.lower()
for record in caplog.records
if "unexpectedly negative" in record.message
]
assert len(expected_warnings) == 1
assert "on hold bytes" in expected_warnings[0]
assert "-12" in expected_warnings[0]
assert manager._on_hold_bytes == 0 # should be auto-corrected
def test_send_unary_ack():
manager = make_manager()
ack_reqs_dict = {
"ack_id1": requests.AckRequest(
ack_id="ack_id1", byte_size=0, time_to_ack=20, ordering_key="", future=None
),
"ack_id2": requests.AckRequest(
ack_id="ack_id2", byte_size=0, time_to_ack=20, ordering_key="", future=None
),
}
manager.send_unary_ack(ack_ids=["ack_id1", "ack_id2"], ack_reqs_dict=ack_reqs_dict)
manager._client.acknowledge.assert_called_once_with(
subscription=manager._subscription, ack_ids=["ack_id1", "ack_id2"]
)
def test_send_unary_modack():
manager = make_manager()
ack_reqs_dict = {
"ack_id3": requests.ModAckRequest(ack_id="ack_id3", seconds=60, future=None),
"ack_id4": requests.ModAckRequest(ack_id="ack_id4", seconds=60, future=None),
"ack_id5": requests.ModAckRequest(ack_id="ack_id5", seconds=60, future=None),
}
manager.send_unary_modack(
modify_deadline_ack_ids=["ack_id3", "ack_id4", "ack_id5"],
modify_deadline_seconds=[10, 20, 20],
ack_reqs_dict=ack_reqs_dict,
)
manager._client.modify_ack_deadline.assert_has_calls(
[
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id3"],
ack_deadline_seconds=10,
),
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id4", "ack_id5"],
ack_deadline_seconds=20,
),
],
any_order=True,
)
def test_send_unary_ack_api_call_error(caplog):
caplog.set_level(logging.DEBUG)
manager = make_manager()
error = exceptions.GoogleAPICallError("The front fell off")
manager._client.acknowledge.side_effect = error
ack_reqs_dict = {
"ack_id1": requests.AckRequest(
ack_id="ack_id1", byte_size=0, time_to_ack=20, ordering_key="", future=None
),
"ack_id2": requests.AckRequest(
ack_id="ack_id2", byte_size=0, time_to_ack=20, ordering_key="", future=None
),
}
manager.send_unary_ack(ack_ids=["ack_id1", "ack_id2"], ack_reqs_dict=ack_reqs_dict)
assert "The front fell off" in caplog.text
def test_send_unary_modack_api_call_error(caplog):
caplog.set_level(logging.DEBUG)
manager = make_manager()
error = exceptions.GoogleAPICallError("The front fell off")
manager._client.modify_ack_deadline.side_effect = error
ack_reqs_dict = {
"ack_id1": requests.AckRequest(
ack_id="ack_id1",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=futures.Future(),
),
"ack_id2": requests.AckRequest(
ack_id="ack_id2",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=futures.Future(),
),
}
manager.send_unary_modack(
modify_deadline_ack_ids=["ack_id_string"],
modify_deadline_seconds=[0],
ack_reqs_dict=ack_reqs_dict,
)
assert "The front fell off" in caplog.text
def test_send_unary_ack_retry_error(caplog):
caplog.set_level(logging.DEBUG)
manager, _, _, _, _, _ = make_running_manager()
error = exceptions.RetryError(
"Too long a transient error", cause=Exception("Out of time!")
)
manager._client.acknowledge.side_effect = error
future1 = futures.Future()
future2 = futures.Future()
ack_reqs_dict = {
"ack_id1": requests.AckRequest(
ack_id="ack_id1",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=future1,
),
"ack_id2": requests.AckRequest(
ack_id="ack_id2",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=future2,
),
}
with pytest.raises(exceptions.RetryError):
manager.send_unary_ack(
ack_ids=["ack_id1", "ack_id2"], ack_reqs_dict=ack_reqs_dict
)
assert "RetryError while sending unary RPC" in caplog.text
assert "signaled streaming pull manager shutdown" in caplog.text
assert isinstance(future1.exception(), subscriber_exceptions.AcknowledgeError)
assert (
future1.exception().error_code is subscriber_exceptions.AcknowledgeStatus.OTHER
)
assert isinstance(future2.exception(), subscriber_exceptions.AcknowledgeError)
assert (
future2.exception().error_code is subscriber_exceptions.AcknowledgeStatus.OTHER
)
def test_send_unary_modack_retry_error(caplog):
caplog.set_level(logging.DEBUG)
manager, _, _, _, _, _ = make_running_manager()
error = exceptions.RetryError(
"Too long a transient error", cause=Exception("Out of time!")
)
manager._client.modify_ack_deadline.side_effect = error
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.ModAckRequest(ack_id="ackid1", seconds=60, future=future)
}
with pytest.raises(exceptions.RetryError):
manager.send_unary_modack(
modify_deadline_ack_ids=["ackid1"],
modify_deadline_seconds=[0],
ack_reqs_dict=ack_reqs_dict,
)
assert "RetryError while sending unary RPC" in caplog.text
assert "signaled streaming pull manager shutdown" in caplog.text
assert isinstance(future.exception(), subscriber_exceptions.AcknowledgeError)
assert (
future.exception().error_code is subscriber_exceptions.AcknowledgeStatus.OTHER
)
def test_heartbeat():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = True
result = manager.heartbeat()
manager._rpc.send.assert_called_once_with(gapic_types.StreamingPullRequest())
assert result
def test_heartbeat_inactive():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = False
manager.heartbeat()
result = manager._rpc.send.assert_not_called()
assert not result
def test_heartbeat_stream_ack_deadline_seconds():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = True
# Send new ack deadline with next heartbeat.
manager._send_new_ack_deadline = True
result = manager.heartbeat()
manager._rpc.send.assert_called_once_with(
gapic_types.StreamingPullRequest(stream_ack_deadline_seconds=10)
)
assert result
# Set to false after a send is initiated.
assert not manager._send_new_ack_deadline
@mock.patch("google.api_core.bidi.ResumableBidiRpc", autospec=True)
@mock.patch("google.api_core.bidi.BackgroundConsumer", autospec=True)
@mock.patch("google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser", autospec=True)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher", autospec=True
)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.heartbeater.Heartbeater", autospec=True
)
def test_open(heartbeater, dispatcher, leaser, background_consumer, resumable_bidi_rpc):
manager = make_manager()
with mock.patch.object(
type(manager), "ack_deadline", new=mock.PropertyMock(return_value=18)
):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
heartbeater.assert_called_once_with(manager)
heartbeater.return_value.start.assert_called_once()
assert manager._heartbeater == heartbeater.return_value
dispatcher.assert_called_once_with(manager, manager._scheduler.queue)
dispatcher.return_value.start.assert_called_once()
assert manager._dispatcher == dispatcher.return_value
leaser.assert_called_once_with(manager)
leaser.return_value.start.assert_called_once()
assert manager.leaser == leaser.return_value
background_consumer.assert_called_once_with(manager._rpc, manager._on_response)
background_consumer.return_value.start.assert_called_once()
assert manager._consumer == background_consumer.return_value
resumable_bidi_rpc.assert_called_once_with(
start_rpc=manager._client.streaming_pull,
initial_request=mock.ANY,
should_recover=manager._should_recover,
should_terminate=manager._should_terminate,
throttle_reopen=True,
)
initial_request_arg = resumable_bidi_rpc.call_args.kwargs["initial_request"]
assert initial_request_arg.func == manager._get_initial_request
assert initial_request_arg.args[0] == 18
assert not manager._client.get_subscription.called
resumable_bidi_rpc.return_value.add_done_callback.assert_called_once_with(
manager._on_rpc_done
)
assert manager._rpc == resumable_bidi_rpc.return_value
manager._consumer.is_active = True
assert manager.is_active is True
def test_open_already_active():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
with pytest.raises(ValueError, match="already open"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def test_open_has_been_closed():
manager = make_manager()
manager._closed = True
with pytest.raises(ValueError, match="closed"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def make_running_manager(**kwargs):
manager = make_manager(**kwargs)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
manager._dispatcher = mock.create_autospec(dispatcher.Dispatcher, instance=True)
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._heartbeater = mock.create_autospec(heartbeater.Heartbeater, instance=True)
return (
manager,
manager._consumer,
manager._dispatcher,
manager._leaser,
manager._heartbeater,
manager._scheduler,
)
def await_manager_shutdown(manager, timeout=None):
# NOTE: This method should be called after manager.close(), i.e. after the shutdown
# thread has been created and started.
shutdown_thread = manager._regular_shutdown_thread
if shutdown_thread is None: # pragma: NO COVER
raise Exception("Shutdown thread does not exist on the manager instance.")
shutdown_thread.join(timeout=timeout)
if shutdown_thread.is_alive(): # pragma: NO COVER
pytest.fail("Shutdown not completed in time.")
def test_close():
(
manager,
consumer,
dispatcher,
leaser,
heartbeater,
scheduler,
) = make_running_manager()
manager.close()
await_manager_shutdown(manager, timeout=3)
consumer.stop.assert_called_once()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
assert manager.is_active is False
def test_close_inactive_consumer():
(
manager,
consumer,
dispatcher,
leaser,
heartbeater,
scheduler,
) = make_running_manager()
consumer.is_active = False
manager.close()
await_manager_shutdown(manager, timeout=3)
consumer.stop.assert_not_called()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
def test_close_idempotent():
manager, _, _, _, _, scheduler = make_running_manager()
manager.close()
manager.close()
await_manager_shutdown(manager, timeout=3)
assert scheduler.shutdown.call_count == 1
class FakeDispatcher(object):
def __init__(self, manager, error_callback):
self._manager = manager
self._error_callback = error_callback
self._thread = None
self._stop = False
def start(self):
self._thread = threading.Thread(target=self._do_work)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._stop = True
self._thread.join()
self._thread = None
def _do_work(self):
while not self._stop:
try:
self._manager.leaser.add([mock.Mock()])
except Exception as exc: # pragma: NO COVER
self._error_callback(exc)
time.sleep(0.1)
# also try to interact with the leaser after the stop flag has been set
try:
self._manager.leaser.remove([mock.Mock()])
except Exception as exc: # pragma: NO COVER
self._error_callback(exc)
def test_close_no_dispatcher_error():
manager, _, _, _, _, _ = make_running_manager()
error_callback = mock.Mock(name="error_callback")
dispatcher = FakeDispatcher(manager=manager, error_callback=error_callback)
manager._dispatcher = dispatcher
dispatcher.start()
manager.close()
await_manager_shutdown(manager, timeout=3)
error_callback.assert_not_called()
def test_close_callbacks():
manager, _, _, _, _, _ = make_running_manager()
callback = mock.Mock()
manager.add_close_callback(callback)
manager.close(reason="meep")
await_manager_shutdown(manager, timeout=3)
callback.assert_called_once_with(manager, "meep")
def test_close_blocking_scheduler_shutdown():
manager, _, _, _, _, _ = make_running_manager(await_callbacks_on_shutdown=True)
scheduler = manager._scheduler
manager.close()
await_manager_shutdown(manager, timeout=3)
scheduler.shutdown.assert_called_once_with(await_msg_callbacks=True)
def test_close_nonblocking_scheduler_shutdown():
manager, _, _, _, _, _ = make_running_manager(await_callbacks_on_shutdown=False)
scheduler = manager._scheduler
manager.close()
await_manager_shutdown(manager, timeout=3)
scheduler.shutdown.assert_called_once_with(await_msg_callbacks=False)
def test_close_nacks_internally_queued_messages():
nacked_messages = []
def fake_nack(self):
nacked_messages.append(self.data)
MockMsg = functools.partial(mock.create_autospec, message.Message, instance=True)
messages = [MockMsg(data=b"msg1"), MockMsg(data=b"msg2"), MockMsg(data=b"msg3")]
for msg in messages:
msg.nack = stdlib_types.MethodType(fake_nack, msg)
manager, _, _, _, _, _ = make_running_manager()
dropped_by_scheduler = messages[:2]
manager._scheduler.shutdown.return_value = dropped_by_scheduler
manager._messages_on_hold._messages_on_hold.append(messages[2])
manager.close()
await_manager_shutdown(manager, timeout=3)
assert sorted(nacked_messages) == [b"msg1", b"msg2", b"msg3"]
def test__get_initial_request():
manager = make_manager()
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._leaser.ack_ids = ["1", "2"]
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, gapic_types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == ["1", "2"]
assert initial_request.modify_deadline_seconds == [10, 10]
def test__get_initial_request_wo_leaser():
manager = make_manager()
manager._leaser = None
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, gapic_types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == []
assert initial_request.modify_deadline_seconds == []
def test__on_response_delivery_attempt():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
delivery_attempt=6,
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
manager._on_response(response)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
msg1 = schedule_calls[0][1][1]
assert msg1.delivery_attempt is None
msg2 = schedule_calls[1][1][1]
assert msg2.delivery_attempt == 6
def test__on_response_modifies_ack_deadline():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="ack_1",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="ack_2",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=80)
# Actually run the method and chack that correct MODACK value is used.
with mock.patch.object(
type(manager), "ack_deadline", new=mock.PropertyMock(return_value=18)
):
manager._on_response(response)
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("ack_1", 18, None),
requests.ModAckRequest("ack_2", 18, None),
]
)
def test__on_response_modifies_ack_deadline_with_exactly_once_min_lease():
# exactly_once is disabled by default.
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
complete_modify_ack_deadline_calls(dispatcher)
# make p99 value smaller than exactly_once min lease
manager.ack_histogram.add(10)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# Set up the response with the first set of messages and exactly_once not
# enabled.
response1 = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="ack_1",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="ack_2",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
],
subscription_properties=gapic_types.StreamingPullResponse.SubscriptionProperties(
exactly_once_delivery_enabled=False
),
)
# Set up the response with the second set of messages and exactly_once enabled.
response2 = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="ack_3",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="ack_4",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
],
subscription_properties=gapic_types.StreamingPullResponse.SubscriptionProperties(
exactly_once_delivery_enabled=True
),
)
# exactly_once is still disabled b/c subscription_properties says so
manager._on_response(response1)
# expect mod-acks are called with histogram-based lease value
assert len(dispatcher.modify_ack_deadline.mock_calls) == 1
call = dispatcher.modify_ack_deadline.mock_calls[0]
assert call.args[0] == [
requests.ModAckRequest("ack_1", 10, None),
requests.ModAckRequest("ack_2", 10, None),
]
# exactly_once should be enabled after this request b/c subscription_properties says so
manager._on_response(response2)
# expect mod-acks called with 60 sec min lease value for exactly_once subscriptions
# ignore the futures here
assert len(dispatcher.modify_ack_deadline.mock_calls) == 2
call = dispatcher.modify_ack_deadline.mock_calls[1]
modack_reqs = call.args[0]
assert modack_reqs[0].ack_id == "ack_3"
assert modack_reqs[0].seconds == 60
assert modack_reqs[1].ack_id == "ack_4"
assert modack_reqs[1].seconds == 60
def test__on_response_send_ack_deadline_after_enabling_exactly_once():
# exactly_once is disabled by default.
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
complete_modify_ack_deadline_calls(dispatcher)
# set up an active RPC
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = True
# make p99 value smaller than exactly_once min lease
manager.ack_histogram.add(10)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# Set up the response with the a message and exactly_once enabled.
response2 = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="ack_1",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
)
],
subscription_properties=gapic_types.StreamingPullResponse.SubscriptionProperties(
exactly_once_delivery_enabled=True
),
)
# exactly_once should be enabled after this request b/c subscription_properties says so
# when exactly_once is enabled or disabled, we send a new ack_deadline via
# the heartbeat
# should satisfy assertion 1
manager._on_response(response2)
# simulate periodic heartbeat trigger
heartbeat_request_sent = manager.heartbeat()
assert heartbeat_request_sent
# heartbeat request is sent with the 60 sec min lease value for exactly_once subscriptions
manager._rpc.send.assert_called_once_with(
gapic_types.StreamingPullRequest(stream_ack_deadline_seconds=60)
)
def test__on_response_no_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10, None),
requests.ModAckRequest("back", 10, None),
]
)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for call in schedule_calls:
assert call[1][0] == mock.sentinel.callback
assert isinstance(call[1][1], message.Message)
# the leaser load limit not hit, no messages had to be put on hold
assert manager._messages_on_hold.size == 0
def test__on_response_with_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
gapic_types.ReceivedMessage(
ack_id="zack",
message=gapic_types.PubsubMessage(data=b"baz", message_id="3"),
),
]
)
# Adjust message bookkeeping in leaser. Pick 999 messages, which is just below
# the default FlowControl.max_messages limit.
fake_leaser_add(leaser, init_msg_count=999, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
# all messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10, None),
requests.ModAckRequest("back", 10, None),
requests.ModAckRequest("zack", 10, None),
]
)
# one message should be scheduled, the flow control limits allow for it
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 1
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
# the rest of the messages should have been put on hold
assert manager._messages_on_hold.size == 2
while True:
msg = manager._messages_on_hold.get()
if msg is None:
break
else:
assert isinstance(msg, message.Message)
assert msg.message_id in ("2", "3")
def test__on_response_none_data(caplog):
caplog.set_level(logging.DEBUG)
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
manager._on_response(response=None)
scheduler.schedule.assert_not_called()
assert "callback invoked with None" in caplog.text
def test__on_response_with_ordering_keys():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(
data=b"foo", message_id="1", ordering_key=""
),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(
data=b"bar", message_id="2", ordering_key="key1"
),
),
gapic_types.ReceivedMessage(
ack_id="zack",
message=gapic_types.PubsubMessage(
data=b"baz", message_id="3", ordering_key="key1"
),
),
]
)
# Make leaser with zero initial messages, so we don't test lease management
# behavior.
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule are called in
# the expected way.
manager._on_response(response)
# All messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks.
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10, None),
requests.ModAckRequest("back", 10, None),
requests.ModAckRequest("zack", 10, None),
]
)
# The first two messages should be scheduled, The third should be put on
# hold because it's blocked by the completion of the second, which has the
# same ordering key.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
call_args = schedule_calls[1][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "2"
# Message 3 should have been put on hold.
assert manager._messages_on_hold.size == 1
# No messages available because message 2 (with "key1") has not completed yet.
assert manager._messages_on_hold.get() is None
# Complete message 2 (with "key1").
manager.activate_ordering_keys(["key1"])
# Completing message 2 should release message 3.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 3
call_args = schedule_calls[2][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "3"
# No messages available in the queue.
assert manager._messages_on_hold.get() is None
def test__on_response_enable_exactly_once():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
complete_modify_ack_deadline_calls(dispatcher)
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
)
],
subscription_properties=gapic_types.StreamingPullResponse.SubscriptionProperties(
exactly_once_delivery_enabled=True
),
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# exactly_once should be enabled
manager._on_response(response)
assert manager._exactly_once_delivery_enabled()
# new deadline for exactly_once subscriptions should be used
assert manager.ack_deadline == 60
def test__on_response_disable_exactly_once():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
manager._flow_control = types.FlowControl(
min_duration_per_lease_extension=histogram.MIN_ACK_DEADLINE
)
# enable exactly_once
manager._exactly_once_enabled = True
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
)
],
subscription_properties=gapic_types.StreamingPullResponse.SubscriptionProperties(
exactly_once_delivery_enabled=False
),
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# exactly_once should be disabled
manager._on_response(response)
assert not manager._exactly_once_enabled
# The deadline configured in flow control should be used, not the
# exactly_once minimum since exactly_once has been disabled.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE
def test__on_response_exactly_once_immediate_modacks_fail():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
def complete_futures_with_error(*args, **kwargs):
modack_requests = args[0]
for req in modack_requests:
req.future.set_exception(
subscriber_exceptions.AcknowledgeError(
subscriber_exceptions.AcknowledgeStatus.SUCCESS, None
)
)
dispatcher.modify_ack_deadline.side_effect = complete_futures_with_error
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
)
],
subscription_properties=gapic_types.StreamingPullResponse.SubscriptionProperties(
exactly_once_delivery_enabled=True
),
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# exactly_once should be enabled
manager._on_response(response)
# exceptions are logged, but otherwise no effect
def test__should_recover_true():
manager = make_manager()
details = "UNAVAILABLE. Service taking nap."
exc = exceptions.ServiceUnavailable(details)
assert manager._should_recover(exc) is True
def test__should_recover_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_recover(exc) is False
def test__should_terminate_true():
manager = make_manager()
details = "Cancelled. Go away, before I taunt you a second time."
exc = exceptions.Cancelled(details)
assert manager._should_terminate(exc) is True
def test__should_terminate_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_terminate(exc) is False
@mock.patch("threading.Thread", autospec=True)
def test__on_rpc_done(thread):
manager = make_manager()
manager._on_rpc_done(mock.sentinel.error)
thread.assert_called_once_with(
name=mock.ANY, target=manager._shutdown, kwargs={"reason": mock.ANY}
)
_, kwargs = thread.call_args
reason = kwargs["kwargs"]["reason"]
assert isinstance(reason, Exception)
assert reason.args == (mock.sentinel.error,) # Exception wraps the original error
def test_activate_ordering_keys():
manager = make_manager()
manager._messages_on_hold = mock.create_autospec(
messages_on_hold.MessagesOnHold, instance=True
)
manager.activate_ordering_keys(["key1", "key2"])
manager._messages_on_hold.activate_ordering_keys.assert_called_once_with(
["key1", "key2"], mock.ANY
)
def test_activate_ordering_keys_stopped_scheduler():
manager = make_manager()
manager._messages_on_hold = mock.create_autospec(
messages_on_hold.MessagesOnHold, instance=True
)
manager._scheduler = None
manager.activate_ordering_keys(["key1", "key2"])
manager._messages_on_hold.activate_ordering_keys.assert_not_called()
@mock.patch("grpc_status.rpc_status.from_call")
@mock.patch("google.protobuf.any_pb2.Any.Unpack")
def test_get_ack_errors_unable_to_unpack(from_call, unpack):
st = status_pb2.Status()
st.code = code_pb2.Code.INTERNAL
st.message = "qmsg"
error_info = error_details_pb2.ErrorInfo()
error_info.metadata["ack_1"] = "error1"
st.details.add().Pack(error_info)
mock_gprc_call = mock.Mock(spec=grpc.Call)
exception = exceptions.InternalServerError(
"msg", errors=(), response=mock_gprc_call
)
from_call.return_value = st
# Unpack() failed
unpack.return_value = None
assert not streaming_pull_manager._get_ack_errors(exception)
@mock.patch("grpc_status.rpc_status.from_call")
def test_get_ack_errors_no_response_obj(from_call):
exception = exceptions.InternalServerError("msg", errors=(), response=None)
# No response obj
assert not streaming_pull_manager._get_ack_errors(exception)
@mock.patch("grpc_status.rpc_status.from_call")
def test_get_ack_errors_from_call_returned_none(from_call):
mock_gprc_call = mock.Mock(spec=grpc.Call)
exception = exceptions.InternalServerError(
"msg", errors=(), response=mock_gprc_call
)
from_call.return_value = None
# rpc_status.from_call() returned None
assert not streaming_pull_manager._get_ack_errors(exception)
@mock.patch("grpc_status.rpc_status.from_call")
def test_get_ack_errors_value_error_thrown(from_call):
mock_gprc_call = mock.Mock(spec=grpc.Call)
exception = exceptions.InternalServerError(
"msg", errors=(), response=mock_gprc_call
)
from_call.side_effect = ValueError("val error msg")
# ValueError thrown, so return None
assert not streaming_pull_manager._get_ack_errors(exception)
@mock.patch("grpc_status.rpc_status.from_call")
def test_get_ack_errors_no_error_details(from_call):
st = status_pb2.Status()
st.code = code_pb2.Code.INTERNAL
st.message = "qmsg"
mock_gprc_call = mock.Mock(spec=grpc.Call)
exception = exceptions.InternalServerError(
"msg", errors=(), response=mock_gprc_call
)
from_call.side_effect = None
from_call.return_value = st
# status has no details to extract exactly-once error info from
assert not streaming_pull_manager._get_ack_errors(exception)
@mock.patch("grpc_status.rpc_status.from_call")
def test_get_ack_errors_detail_not_error_info(from_call):
st = status_pb2.Status()
st.code = code_pb2.Code.INTERNAL
st.message = "qmsg"
# pack a dummy status instead of an ErrorInfo
dummy_status = status_pb2.Status()
st.details.add().Pack(dummy_status)
mock_gprc_call = mock.Mock(spec=grpc.Call)
exception = exceptions.InternalServerError(
"msg", errors=(), response=mock_gprc_call
)
from_call.side_effect = None
from_call.return_value = st
assert not streaming_pull_manager._get_ack_errors(exception)
@mock.patch("grpc_status.rpc_status.from_call")
def test_get_ack_errors_happy_case(from_call):
st = status_pb2.Status()
st.code = code_pb2.Code.INTERNAL
st.message = "qmsg"
error_info = error_details_pb2.ErrorInfo()
error_info.metadata["ack_1"] = "error1"
st.details.add().Pack(error_info)
mock_gprc_call = mock.Mock(spec=grpc.Call)
exception = exceptions.InternalServerError(
"msg", errors=(), response=mock_gprc_call
)
from_call.side_effect = None
from_call.return_value = st
# happy case - errors returned in a map
ack_errors = streaming_pull_manager._get_ack_errors(exception)
assert ack_errors
assert ack_errors["ack_1"] == "error1"
def test_process_requests_no_requests():
# no requests so no items in results lists
ack_reqs_dict = {}
errors_dict = {}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
assert not requests_completed
assert not requests_to_retry
def test_process_requests_error_dict_is_none():
# it's valid to pass in `None` for `errors_dict`
ack_reqs_dict = {}
errors_dict = None
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
assert not requests_completed
assert not requests_to_retry
def test_process_requests_no_errors_has_no_future():
# no errors so request should be completed, even with no future
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=None
)
}
errors_dict = {}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
assert requests_completed[0].ack_id == "ackid1"
assert not requests_to_retry
def test_process_requests_no_errors():
# no errors so request and its future should be completed
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
)
}
errors_dict = {}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
assert requests_completed[0].ack_id == "ackid1"
assert future.result() == subscriber_exceptions.AcknowledgeStatus.SUCCESS
assert not requests_to_retry
def test_process_requests_permanent_error_raises_exception():
# a permanent error raises an exception
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
)
}
errors_dict = {"ackid1": "PERMANENT_FAILURE_INVALID_ACK_ID"}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
assert requests_completed[0].ack_id == "ackid1"
with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
future.result()
assert (
exc_info.value.error_code
== subscriber_exceptions.AcknowledgeStatus.INVALID_ACK_ID
)
assert not requests_to_retry
def test_process_requests_transient_error_returns_request_for_retrying():
# a transient error returns the request in `requests_to_retry`
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
)
}
errors_dict = {"ackid1": "TRANSIENT_FAILURE_INVALID_ACK_ID"}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
assert not requests_completed
assert requests_to_retry[0].ack_id == "ackid1"
assert not future.done()
def test_process_requests_unknown_error_raises_exception():
# an unknown error raises an exception
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
)
}
errors_dict = {"ackid1": "unknown_error"}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
assert requests_completed[0].ack_id == "ackid1"
with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
future.result()
assert exc_info.value.error_code == subscriber_exceptions.AcknowledgeStatus.OTHER
assert exc_info.value.info == "unknown_error"
assert not requests_to_retry
def test_process_requests_retriable_error_status_returns_request_for_retrying():
# a retriable error status returns the request in `requests_to_retry`
retriable_errors = [
code_pb2.DEADLINE_EXCEEDED,
code_pb2.RESOURCE_EXHAUSTED,
code_pb2.ABORTED,
code_pb2.INTERNAL,
code_pb2.UNAVAILABLE,
]
for retriable_error in retriable_errors:
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=future,
)
}
st = status_pb2.Status()
st.code = retriable_error
(
requests_completed,
requests_to_retry,
) = streaming_pull_manager._process_requests(st, ack_reqs_dict, None)
assert not requests_completed
assert requests_to_retry[0].ack_id == "ackid1"
assert not future.done()
def test_process_requests_permission_denied_error_status_raises_exception():
# a permission-denied error status raises an exception
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
)
}
st = status_pb2.Status()
st.code = code_pb2.Code.PERMISSION_DENIED
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
st, ack_reqs_dict, None
)
assert requests_completed[0].ack_id == "ackid1"
with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
future.result()
assert (
exc_info.value.error_code
== subscriber_exceptions.AcknowledgeStatus.PERMISSION_DENIED
)
assert exc_info.value.info is None
assert not requests_to_retry
def test_process_requests_failed_precondition_error_status_raises_exception():
# a failed-precondition error status raises an exception
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
)
}
st = status_pb2.Status()
st.code = code_pb2.Code.FAILED_PRECONDITION
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
st, ack_reqs_dict, None
)
assert requests_completed[0].ack_id == "ackid1"
with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
future.result()
assert (
exc_info.value.error_code
== subscriber_exceptions.AcknowledgeStatus.FAILED_PRECONDITION
)
assert exc_info.value.info is None
assert not requests_to_retry
def test_process_requests_other_error_status_raises_exception():
# an unrecognized error status raises an exception
future = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
)
}
st = status_pb2.Status()
st.code = code_pb2.Code.OUT_OF_RANGE
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
st, ack_reqs_dict, None
)
assert requests_completed[0].ack_id == "ackid1"
with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
future.result()
assert exc_info.value.error_code == subscriber_exceptions.AcknowledgeStatus.OTHER
assert not requests_to_retry
def test_process_requests_mixed_success_and_failure_acks():
# mixed success and failure (acks)
future1 = futures.Future()
future2 = futures.Future()
future3 = futures.Future()
ack_reqs_dict = {
"ackid1": requests.AckRequest(
ack_id="ackid1",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=future1,
),
"ackid2": requests.AckRequest(
ack_id="ackid2",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=future2,
),
"ackid3": requests.AckRequest(
ack_id="ackid3",
byte_size=0,
time_to_ack=20,
ordering_key="",
future=future3,
),
}
errors_dict = {
"ackid1": "PERMANENT_FAILURE_INVALID_ACK_ID",
"ackid2": "TRANSIENT_FAILURE_INVALID_ACK_ID",
}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
# message with ack_id 'ackid1' fails with an exception
assert requests_completed[0].ack_id == "ackid1"
with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
future1.result()
assert (
exc_info.value.error_code
== subscriber_exceptions.AcknowledgeStatus.INVALID_ACK_ID
)
# message with ack_id 'ackid2' is to be retried
assert requests_to_retry[0].ack_id == "ackid2"
assert not requests_to_retry[0].future.done()
# message with ack_id 'ackid3' succeeds
assert requests_completed[1].ack_id == "ackid3"
assert future3.result() == subscriber_exceptions.AcknowledgeStatus.SUCCESS
def test_process_requests_mixed_success_and_failure_modacks():
# mixed success and failure (modacks)
future1 = futures.Future()
future2 = futures.Future()
future3 = futures.Future()
ack_reqs_dict = {
"ackid1": requests.ModAckRequest(ack_id="ackid1", seconds=60, future=future1),
"ackid2": requests.ModAckRequest(ack_id="ackid2", seconds=60, future=future2),
"ackid3": requests.ModAckRequest(ack_id="ackid3", seconds=60, future=future3),
}
errors_dict = {
"ackid1": "PERMANENT_FAILURE_INVALID_ACK_ID",
"ackid2": "TRANSIENT_FAILURE_INVALID_ACK_ID",
}
requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
None, ack_reqs_dict, errors_dict
)
# message with ack_id 'ackid1' fails with an exception
assert requests_completed[0].ack_id == "ackid1"
with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
future1.result()
assert (
exc_info.value.error_code
== subscriber_exceptions.AcknowledgeStatus.INVALID_ACK_ID
)
# message with ack_id 'ackid2' is to be retried
assert requests_to_retry[0].ack_id == "ackid2"
assert not requests_to_retry[0].future.done()
# message with ack_id 'ackid3' succeeds
assert requests_completed[1].ack_id == "ackid3"
assert future3.result() == subscriber_exceptions.AcknowledgeStatus.SUCCESS
|
|
# -*- coding: utf-8 -*-
"""
Taggers wrapping the neural networks.
"""
import logging
import numpy as np
from itertools import izip
from . import utils
from . import config
from . import attributes
from .metadata import Metadata
from .pos import POSReader
from .srl import SRLReader
from .parse import DependencyReader
from .network import Network, ConvolutionalNetwork, ConvolutionalDependencyNetwork
def load_network(md):
"""
Loads the network from the default file and returns it.
"""
logger = logging.getLogger("Logger")
is_srl = md.task.startswith('srl') and md.task != 'srl_predicates'
logger.info('Loading network')
if is_srl:
net_class = ConvolutionalNetwork
elif md.task.endswith('dependency'):
net_class = ConvolutionalDependencyNetwork
else:
net_class = Network
nn = net_class.load_from_file(md.paths[md.network])
logger.info('Done')
return nn
def create_reader(md, gold_file=None):
"""
Creates a TextReader object for the given task and loads its dictionary.
:param md: a metadata object describing the task
:param gold_file: path to a file with gold standard data, if
the reader will be used for testing.
"""
logger = logging.getLogger('Logger')
logger.info('Loading text reader...')
if md.task == 'pos':
tr = POSReader(md, filename=gold_file)
elif 'dependency' in md.task:
labeled = md.task.startswith('labeled')
tr = DependencyReader(md, filename=gold_file, labeled=labeled)
elif md.task.startswith('srl'):
tr = SRLReader(md, filename=gold_file, only_boundaries= (md.task == 'srl_boundary'),
only_classify= (md.task == 'srl_classify'),
only_predicates= (md.task == 'srl_predicates'))
else:
raise ValueError("Unknown task: %s" % md.task)
logger.info('Done')
return tr
def _group_arguments(tokens, predicate_positions, boundaries, labels):
"""
Groups words pertaining to each argument and returns a dictionary for each predicate.
"""
arg_structs = []
for predicate_position, pred_boundaries, pred_labels in izip(predicate_positions,
boundaries,
labels):
structure = {}
for token, boundary_tag in izip(tokens, pred_boundaries):
if boundary_tag == 'O':
continue
elif boundary_tag == 'B':
argument_tokens = [token]
elif boundary_tag == 'I':
argument_tokens.append(token)
elif boundary_tag == 'E':
argument_tokens.append(token)
tag = pred_labels.pop(0)
structure[tag] = argument_tokens
else:
# boundary_tag == 'S'
tag = pred_labels.pop(0)
structure[tag] = [token]
predicate = tokens[predicate_position]
arg_structs.append((predicate, structure))
return arg_structs
class SRLAnnotatedSentence(object):
"""
Class storing a sentence with annotated semantic roles.
It stores a list with the sentence tokens, called `tokens`, and a list of tuples
in the format `(predicate, arg_strucutres)`. Each `arg_structure` is a dict mapping
semantic roles to the words that constitute it. This is used instead of a two-level
dictionary because one sentence may have more than one occurrence of the same
predicate.
This class is used only for storing data.
"""
def __init__(self, tokens, arg_structures):
"""
Creates an instance of a sentence with SRL data.
:param tokens: a list of strings
:param arg_structures: a list of tuples in the format (predicate, mapping).
Each predicate is a string and each mapping is a dictionary mapping role labels
to the words that constitute it.
"""
self.tokens = tokens
self.arg_structures = arg_structures
class ParsedSentence(object):
"""
Class for storing a sentence with dependency parsing annotation.
It stores a list of tokens, the dependency heads, dependency labels and POS tags
if the parser used them. Dependency heads are the index of the head of each
token, and -1 means a dependency to the root.
"""
def __init__(self, tokens, heads, labels, pos=None):
"""
Constructor.
:param tokens: list of strings
:param heads: list of integers (-1 means dependency to root, others
are token indices)
:param labels: list of strings
:param pos: None or list of strings
"""
self.tokens = tokens
self.heads = heads
self.labels = labels
self.pos = pos
def __len__(self):
return len(self.tokens)
def to_conll(self):
"""
Return a string representation of the sentence in CoNLL X format.
Each line has:
[number starting from 1] token _ POS POS _ head label
Token numbers start from 1, root is referred as 0.
POS is only available if the original parser used it.
"""
result = []
for i in range(len(self.tokens)):
token = self.tokens[i]
head = self.heads[i] + 1
label = self.labels[i]
pos = self.pos[i] if self.pos else '_'
line = u'{id}\t{token}\t_\t{pos}\t{pos}\t_\t{head}\t{label}'
result.append(line.format(id=i+1, pos=pos, head=head, label=label, token=token))
return '\n'.join(result)
class Tagger(object):
"""
Base class for taggers. It should not be instantiated.
"""
def __init__(self, data_dir=None, language='en'):
"""Creates a tagger and loads data preemptively"""
asrt_msg = "nlpnet data directory is not set. \
If you don't have the trained models, download them from http://nilc.icmc.usp.br/nlpnet/models.html"
if data_dir is None:
assert config.data_dir is not None, asrt_msg
self.paths = config.FILES
else:
self.paths = config.get_config_paths(data_dir)
self.data_dir = data_dir
self.language = language
self._load_data()
def _load_data(self):
"""Implemented by subclasses"""
pass
class SRLTagger(Tagger):
"""
An SRLTagger loads the models and performs SRL on text.
It works on three stages: predicate identification, argument detection and
argument classification.
"""
def _load_data(self):
"""Loads data for SRL"""
# load boundary identification network and reader
md_boundary = Metadata.load_from_file('srl_boundary', self.paths)
self.boundary_nn = load_network(md_boundary)
self.boundary_reader = create_reader(md_boundary)
self.boundary_reader.create_converter()
self.boundary_itd = self.boundary_reader.get_inverse_tag_dictionary()
# same for arg classification
md_classify = Metadata.load_from_file('srl_classify', self.paths)
self.classify_nn = load_network(md_classify)
self.classify_reader = create_reader(md_classify)
self.classify_reader.create_converter()
self.classify_itd = self.classify_reader.get_inverse_tag_dictionary()
# predicate detection
md_pred = Metadata.load_from_file('srl_predicates', self.paths)
self.pred_nn = load_network(md_pred)
self.pred_reader = create_reader(md_pred)
self.pred_reader.create_converter()
def find_predicates(self, tokens):
"""
Finds out which tokens are predicates.
:param tokens: a list of attribute.Token elements
:returns: the indices of predicate tokens
"""
sent_codified = np.array([self.pred_reader.converter.convert(token)
for token in tokens])
answer = np.array(self.pred_nn.tag_sentence(sent_codified))
return answer.nonzero()[0]
def tag(self, text):
"""
Runs the SRL process on the given text.
:param text: unicode or str encoded in utf-8.
:param no_repeats: whether to prevent repeated argument labels
:returns: a list of SRLAnnotatedSentence objects
"""
tokens = utils.tokenize(text, self.language)
result = []
for sent in tokens:
tagged = self.tag_tokens(sent)
result.append(tagged)
return result
def tag_tokens(self, tokens, no_repeats=False):
"""
Runs the SRL process on the given tokens.
:param tokens: a list of tokens (as strings)
:param no_repeats: whether to prevent repeated argument labels
:returns: a list of lists (one list for each sentence). Sentences have tuples
(all_tokens, predicate, arg_structure), where arg_structure is a dictionary
mapping argument labels to the words it includes.
"""
if self.language == 'pt':
tokens_obj = [attributes.Token(utils.clean_text(t, False)) for t in tokens]
else:
tokens_obj = [attributes.Token(t) for t in tokens]
converted_bound = np.array([self.boundary_reader.converter.convert(t)
for t in tokens_obj])
converted_class = np.array([self.classify_reader.converter.convert(t)
for t in tokens_obj])
pred_positions = self.find_predicates(tokens_obj)
# first, argument boundary detection
# the answer includes all predicates
answers = self.boundary_nn.tag_sentence(converted_bound, pred_positions)
boundaries = [[self.boundary_itd[x] for x in pred_answer]
for pred_answer in answers]
arg_limits = [utils.boundaries_to_arg_limits(pred_boundaries)
for pred_boundaries in boundaries]
# now, argument classification
answers = self.classify_nn.tag_sentence(converted_class,
pred_positions, arg_limits,
allow_repeats=not no_repeats)
arguments = [[self.classify_itd[x] for x in pred_answer]
for pred_answer in answers]
structures = _group_arguments(tokens, pred_positions, boundaries, arguments)
return SRLAnnotatedSentence(tokens, structures)
class DependencyParser(Tagger):
"""A Dependency Parser based on a neural network tagger."""
def __init__(self, *args, **kwargs):
"""
Set the data directory for the POS tagger, if one is used,
and call the parent constructor.
"""
super(DependencyParser, self).__init__(*args, **kwargs)
def _load_data(self):
"""Loads data for Dependency Parsing"""
md_udep = Metadata.load_from_file('unlabeled_dependency', paths=self.paths)
self.unlabeled_nn = load_network(md_udep)
self.unlabeled_reader = create_reader(md_udep)
md_ldep = Metadata.load_from_file('labeled_dependency', paths=self.paths)
self.labeled_nn = load_network(md_ldep)
self.labeled_reader = create_reader(md_ldep)
self.itd = self.labeled_reader.get_inverse_tag_dictionary()
self.use_pos = md_udep.use_pos or md_ldep.use_pos
if self.use_pos:
self.pos_tagger = POSTagger(self.data_dir, language=self.language)
def parse(self, text):
"""
Split the given text into sentences and determines their
dependency trees. If you want to provide your own tokenized
text, use `parse_sentence` instead.
:param text: a string
:returns: a list of ParsedSentence's
"""
sentences = utils.tokenize(text, self.language)
result = []
for sent in sentences:
parsed = self.parse_sentence(sent)
result.append(parsed)
return result
def tag_tokens(self, tokens):
"""
Parse the given sentence. This function is just an alias for
`parse_sentence`.
"""
return self.parse_sentence(tokens)
def parse_sentence(self, tokens):
"""
Parse the given sentence. It must be already tokenized; if you
want nlpnet to tokenize the text, use the method `parse` instead.
:param tokens: a list of strings
:return: a ParsedSentence instance
"""
original_tokens = tokens
tokens_obj = []
# if the parser uses POS a feature, have a tagger tag it first
if self.use_pos:
tokens = self.pos_tagger.tag_tokens(tokens, return_tokens=True)
for token in tokens:
if self.use_pos:
# if we tagged for POS, each item is a tuple
word, pos = token
else:
pos = None
tokens_obj.append(attributes.Token(word, pos=pos))
converted_tokens = self.unlabeled_reader.codify_sentence(tokens_obj)
heads = self.unlabeled_nn.tag_sentence(converted_tokens)
# the root is returned having a value == len(sentence)
root = heads.argmax()
heads[root] = root
converted_tokens = self.labeled_reader.codify_sentence(tokens_obj)
label_codes = self.labeled_nn.tag_sentence(converted_tokens, heads)
labels = [self.itd[code] for code in label_codes]
# to the final answer, signal the root with -1
heads[root] = -1
if self.use_pos:
# unzip
pos_tags = zip(*tokens)[1]
else:
pos_tags = None
parsed = ParsedSentence(original_tokens, heads, labels, pos_tags)
return parsed
def tag(self, text):
"""
Parse the given text. This is just an alias for the
`parse` method.
"""
return self.parse(text)
class POSTagger(Tagger):
"""A POSTagger loads the models and performs POS tagging on text."""
def _load_data(self):
"""Loads data for POS"""
md = Metadata.load_from_file('pos', self.paths)
self.nn = load_network(md)
self.reader = create_reader(md)
self.reader.create_converter()
self.itd = self.reader.get_inverse_tag_dictionary()
def tag(self, text):
"""
Tags the given text.
:param text: a string or unicode object. Strings assumed to be utf-8
:returns: a list of lists (sentences with tokens).
Each sentence has (token, tag) tuples.
"""
tokens = utils.tokenize(text, self.language)
result = []
for sent in tokens:
tagged = self.tag_tokens(sent, return_tokens=True)
result.append(tagged)
return result
def tag_tokens(self, tokens, return_tokens=False):
"""
Tags a given list of tokens.
Tokens should be produced with the nlpnet tokenizer in order to
match the entries in the vocabulary. If you have non-tokenized text,
use POSTagger.tag(text).
:param tokens: a list of strings
:param return_tokens: if True, includes the tokens in the return,
as a list of tuples (token, tag).
:returns: a list of strings (the tags)
"""
converter = self.reader.converter
converted_tokens = np.array([converter.convert(token)
for token in tokens])
answer = self.nn.tag_sentence(converted_tokens)
tags = [self.itd[tag] for tag in answer]
if return_tokens:
return zip(tokens, tags)
return tags
|
|
"""
parted - Command
================
This module provides processing for the ``parted`` command. The output is parsed
by the ``PartedL`` class. Attributes are provided for each field for the disk,
and a list of ``Partition`` class objects, one for each partition in the output.
Typical content of the ``parted -l`` command output
looks like::
Model: ATA TOSHIBA MG04ACA4 (scsi)
Disk /dev/sda: 4001GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags: pmbr_boot
Number Start End Size File system Name Flags
1 1049kB 2097kB 1049kB bios_grub
2 2097kB 526MB 524MB xfs
3 526MB 4001GB 4000GB lvm
The columns may vary depending upon the type of device.
Note:
The examples in this module may be executed with the following command:
``python -m insights.parsers.parted``
Examples:
>>> parted_data = '''
... Model: ATA TOSHIBA MG04ACA4 (scsi)
... Disk /dev/sda: 4001GB
... Sector size (logical/physical): 512B/512B
... Partition Table: gpt
... Disk Flags: pmbr_boot
...
... Number Start End Size File system Name Flags
... 1 1049kB 2097kB 1049kB bios_grub
... 2 2097kB 526MB 524MB xfs
... 3 526MB 4001GB 4000GB lvm
... '''.strip()
>>> from insights.tests import context_wrap
>>> shared = {PartedL: PartedL(context_wrap(parted_data))}
>>> parted_info = shared[PartedL]
>>> parted_info.data
{'partition_table': 'gpt', 'sector_size': '512B/512B', 'disk_flags': 'pmbr_boot', 'partitions': [{'end': '2097kB', 'name': 'bios_grub', 'number': '1', 'start': '1049kB', 'flags': 'bios_grub', 'file_system': 'bios_grub', 'size': '1049kB'}, {'start': '2097kB', 'size': '524MB', 'end': '526MB', 'number': '2', 'file_system': 'xfs'}, {'end': '4001GB', 'name': 'lvm', 'number': '3', 'start': '526MB', 'flags': 'lvm', 'file_system': 'lvm', 'size': '4000GB'}], 'model': 'ATA TOSHIBA MG04ACA4 (scsi)', 'disk': '/dev/sda', 'size': '4001GB'}
>>> parted_info.data['model']
'ATA TOSHIBA MG04ACA4 (scsi)'
>>> parted_info.disk
'/dev/sda'
>>> parted_info.logical_sector_size
'512B'
>>> parted_info.physical_sector_size
'512B'
>>> parted_info.boot_partition
>>> parted_info.data['disk_flags']
'pmbr_boot'
>>> len(parted_info.partitions)
3
>>> parted_info.partitions[0].data
{'end': '2097kB', 'name': 'bios_grub', 'number': '1', 'start': '1049kB', 'flags': 'bios_grub', 'file_system': 'bios_grub', 'size': '1049kB'}
>>> parted_info.partitions[0].number
'1'
>>> parted_info.partitions[0].start
'1049kB'
>>> parted_info.partitions[0].end
'2097kB'
>>> parted_info.partitions[0].size
'1049kB'
>>> parted_info.partitions[0].file_system
'bios_grub'
>>> parted_info.partitions[0].type
>>> parted_info.partitions[0].flags
'bios_grub'
"""
from .. import Parser, parser
from ..parsers import ParseException
class Partition(object):
"""Class to contain information for one partition.
Represents the values from one row of the partition information from the
``parted`` command. Column names have been converted to lowercase and are
provided as attributes. Column names may vary so the ``get`` method may
be used to check for the presence of a column.
Attributes:
data (dict): Dictionary of partition information keyed by column names
in lowercase.
"""
def __init__(self, data):
self.data = data
@property
def number(self):
"""str: Partition number."""
return self.data.get('number')
@property
def start(self):
"""str: Starting location for the partition."""
return self.data.get('start')
@property
def end(self):
"""str: Ending location for the partition."""
return self.data.get('end')
@property
def size(self):
"""str: Size of the partition."""
return self.data.get('size')
@property
def file_system(self):
"""str: File system type."""
return self.data.get('file_system')
@property
def type(self):
"""str: File system type."""
return self.data.get('type')
@property
def flags(self):
"""str: Partition flags."""
return self.data.get('flags')
def get(self, item):
"""Get information for column ``item`` or ``None`` if not present."""
return self.data.get(item)
@parser("parted_-l")
class PartedL(Parser):
"""Class to represent attributes of the ``parted`` command output.
The columns may vary depending upon the type of device.
Attributes:
data (dict): Dictionary of information returned by ``parted`` command.
Raises:
ParseException: Raised if ``parted`` output indicates "error" or
"warning" in first line, or if "disk" field is not present, or if
there is an error parsing the data.
ValueError: Raised if there is an error parsing the partition table.
"""
@property
def partitions(self):
"""list: List of ``Partition`` objects for each partition."""
return self._partitions
@property
def disk(self):
"""str: Disk information."""
return self.data['disk']
@property
def logical_sector_size(self):
"""str: Logical part of sector size."""
if self._sector_size:
return self._sector_size[0]
@property
def physical_sector_size(self):
"""str: Physical part of sector size."""
if self._sector_size:
return self._sector_size[1]
@property
def boot_partition(self):
"""Partition: Returns a ``Partition`` object if `boot` is found in
partition flags. ``None`` is returned otherwise."""
return self._boot_partition
def get(self, item):
"""Returns a value for the specified ``item`` key."""
return self.data.get(item)
def parse_content(self, content):
# If device was not present output is error message
if content[0].startswith("Error") or content[0].startswith("Warning"):
raise ParseException("PartedL content indicates an error %s" % content[0])
dev_info = {}
table_lines = []
for line in content:
if not line.strip():
continue
if ':' in line:
label_value = line.split(':')
label = label_value[0].strip().lower()
value = label_value[1] if len(label_value) == 2 else None
value = value.strip() if value.strip() else None
if value:
# Single word labels
if ' ' not in label:
dev_info[label] = value
else:
if label.startswith("disk") and '/' in label:
disk_parts = label.split()
dev_info['disk'] = disk_parts[1].strip()
dev_info['size'] = value
elif label.startswith("sector"):
dev_info['sector_size'] = value
else:
label = label.replace(' ', '_')
dev_info[label] = value
else:
table_lines.append(line)
if 'disk' not in dev_info:
raise ParseException("PartedL unable to locate Disk in content")
partitions = []
if table_lines:
line = table_lines[0].replace('File system', 'File_system')
cols = line.strip().split()
columns = {}
for n in cols:
columns[n] = {'name': n.lower()}
columns[n]['start'] = line.find(n)
columns[n]['end'] = columns[n]['start'] + len(n)
for line in table_lines[1:]:
line = line.rstrip()
part = {}
for col in columns.values():
if len(line) > col['start']:
val = line[col['start']:]
val = val.strip().split(None, 1)[0]
part[col['name']] = val
if part:
partitions.append(part)
self._partitions = []
self._boot_partition = None
self._sector_size = None
self.data = {}
if dev_info:
if partitions:
dev_info['partitions'] = partitions
for part in partitions:
self._partitions.append(Partition(part))
if 'flags' in part and 'boot' in part['flags']:
self._boot_partition = Partition(part)
self.data = dev_info
if 'sector_size' in self.data:
self._sector_size = self.data['sector_size'].split('/', 1)
if len(self._sector_size) != 2:
self._sector_size = None
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Roboterclub Aachen e.V.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Roboterclub Aachen e.V. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ROBOTERCLUB AACHEN E.V. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ROBOTERCLUB AACHEN E.V. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import os
import sys
import builder_base
import numpy as np
from pyx import *
from pyx.connector import arc, curve
sys.path = [os.path.join(os.path.dirname(__file__), '..')] + sys.path
from xmlparser.utils import SingleAssignDictionary
class SystemLayoutBuilder(builder_base.Builder):
"""
Generate a visual map of all containers, components and events.
A common call would be like:
# 2012
$ python system_layout.py ../../../../roboter/2012_captain/software/global/xml/robot.xml -o /tmp -s simulator -s "drive simulation" -s "External"
# 2015
$ python system_layout.py ../../../../season/common/robot.xml --dtdpath ../../../tools/system_design/xml/dtd -o /tmp
$ python system_layout.py ../../../../season/common/robot.xml --dtdpath ../../../tools/system_design/xml/dtd -o /tmp -s "drive big simulation" -s "drive little simulation" -s "External" -s "simulator" -s "drive big"
A PDF called system.pdf is generated in /tmp/system.pdf.
If you want to display callable actions for each component add the -a switch.
"""
VERSION = "0.1"
def get_y(self, event):
"""
Get the vertical coordinate for an event line
"""
return 8.0 - (self.eventsSorted.index(event) * 0.1)
def get_component_width(self, component):
"""
Get the horizontal width of a component.
"""
# width of text
text_width = text.text(0, 0, r"\bf %s" % component, self.textattrs).width.x
# number of arrows
nr = len(component.events.subscribe) + len(component.events.publish)
# Add actions if desired
if self.options.actions:
nr = nr + len(component.actions)
return max((text_width * 15) + 0.1, 0.1 * nr)
def get_container_width(self, container):
"""
Get the horizontal width of a container.
"""
width = 0
for component in container.components:
# include actions from abstract component
component = component.flattened()
width = width + self.get_component_width(component) + 0.1
return width
def get_graph_width(self, tree):
width = 0
for container in tree.container:
# skip containers that the user asked to skip
if self.options.skipList is not None and container.name in self.options.skipList:
continue
width = width + self.get_container_width(container) + 0.1
return width
def generate(self):
# check the commandline options
if not self.options.outpath:
raise builder_base.BuilderException("You need to provide an output path!")
# Initialise
unit.set(uscale=7)
text.set(mode="latex")
text.preamble(r"\usepackage{helvet}")
text.preamble(r"\renewcommand*\familydefault{\sfdefault}")
text.preamble(r"\normalfont")
c = canvas.canvas()
### Object to draw
eventArrowsSource = []
eventCirclesSource = []
eventTextSource = []
eventCirclesSinkless = []
eventCirclesSourcless = []
eventArrowsSink = []
eventCirclesSink = []
eventTextSink = []
actionArrows = []
actionText = []
### Text Attributes
self.textattrs = [text.halign.center, text.vshift.middlezero]
self.textcontainerattrs = [text.halign.left, text.vshift.middlezero]
self.texteventattrs = [text.halign.left, text.vshift(-0.6)]
self.texteventrotattrs = [text.halign.left, text.vshift(-0.6), trafo.rotate(90), color.rgb.red]
self.texteventsubscribedrotattrs = [text.halign.left, text.vshift(-0.6), trafo.rotate(90), color.rgb.blue]
self.textactionrotattrs = [text.halign.left, text.vshift(-0.6), trafo.rotate(90), color.rgb(0.0, 0.5, 0.1)]
# raster for design
debug = False
if debug:
for x in np.arange(0, 20, 0.05):
c.stroke(path.line(x, 10, x, 0),
[style.linewidth.thin, color.gray(0.8)])
for y in np.arange(0, 10, 0.05):
c.stroke(path.line(0, y, 20, y),
[style.linewidth.thin, color.gray(0.8)])
for x in np.arange(0, 20, 0.5):
c.stroke(path.line(x, 10, x, 0),
[style.linewidth.thin, color.gray(0.5)])
for y in np.arange(0, 10, 0.5):
c.stroke(path.line(0, y, 20, y),
[style.linewidth.thin, color.gray(0.5)])
# Sorted events
# Sort events by publishing component. Makes graph more structured.
self.eventsSorted = []
print("Analysing containers:")
for container in self.tree.container:
print " * " + container.name
print("Done. Creating graph")
for container in self.tree.container:
if container.name is None:
continue
if self.options.skipList is not None and container.name in self.options.skipList:
continue
for component in container.components:
# include actions from abstract component
component = component.flattened()
for event in component.events.publish:
# add only once
if not (event in self.eventsSorted):
self.eventsSorted.append(event)
# Add sourceless events to the end
for event in self.tree.events:
if not (event in self.eventsSorted):
self.eventsSorted.append(event)
# Draw light gray horizontal lines for all events
graph_width = self.get_graph_width(self.tree) + 2.1
for event in self.tree.events:
event_y = self.get_y(event)
c.stroke(path.line(0, event_y, graph_width, event_y),
[style.linewidth.THick, color.grey(0.90)])
# Draw Id and name of event
A = text.text(0, event_y, r"\bf \texttt{[0x%02x]} %s" % (event.id, event.name), self.texteventattrs)
c.insert(A)
# Draw type of Event
if event.type is not None:
A = text.text(1.1, event_y, r"%s" % (event.type.name), self.texteventattrs)
c.insert(A)
# Legend
A = text.text( 0, 8.1, r"\textbf{%s}" % ("Id"), self.texteventattrs)
c.insert(A)
A = text.text(0.175, 8.1, r"\textbf{%s}" % ("Name"), self.texteventattrs)
c.insert(A)
A = text.text(1.1, 8.1, r"\textbf{%s}" % ("Type"), self.texteventattrs)
c.insert(A)
sinklessDict = dict()
sourcelessDict = dict()
# Write sink and sourceless events
for event in self.tree.events:
sourceless = True
sinkless = True
for component in self.tree.components:
# include actions from abstract component
component = component.flattened()
if self.options.skipList is not None and component.name in self.options.skipList:
continue
for event_cmp in component.events.publish:
if event_cmp == event:
sourceless = False
for event_cmp in component.events.subscribe:
if event_cmp == event:
sinkless = False
if sourceless:
A = text.text(1.8, self.get_y(event), r"\bf sourceless", self.texteventattrs)
c.insert(A)
sourcelessDict[event.name] = sourceless
if sinkless:
A = text.text(2.1, self.get_y(event), r"\bf sinkless", self.texteventattrs)
c.insert(A)
sinklessDict[event.name] = sinkless
# Empty directories to find out from where to where a thick line
# for events must be drawn.
eventsLeft = SingleAssignDictionary('EventLeft')
eventsRight = dict()
# Write what was skipped
c.insert(text.text(1, 9.8, r"\bf System Design generated from base file", self.textattrs))
c.insert(text.text(1, 9.7, r"\bf %s" % self.xmlfile.replace('_', '\_'), self.textattrs))
c.insert(text.text(1, 9.5, r"\bf Skipped containers:", self.textattrs))
y = 9.40
if self.options.skipList is not None:
for s in self.options.skipList:
A = text.text(1, y, r"\bf %s" % s, self.textattrs)
c.insert(A)
y = y - .1
# Draw containers at the top
container_x = 2.5
for container in self.tree.container:
# skip containers that are requested to skip
if self.options.skipList is not None and container.name in self.options.skipList:
continue
A = text.text(container_x + 0.15, 9.90, r"\bf %s" % container, self.textcontainerattrs)
c.draw(path.rect(container_x + 0.05, 9.65, self.get_container_width(container), 0.30),
[deco.stroked(), deco.filled([color.grey(0.85)])])
c.insert(A)
component_x = container_x
container_x = container_x + self.get_container_width(container) + 0.1
for component in container.components:
# include actions from abstract component
component = component.flattened()
# A = text.text(0, event_y, r"\bf \texttt{[0x%02x]} %s" % (event.id, event.name), self.texteventattrs)
A = text.text(component_x + self.get_component_width(component)/2 + 0.1, 9.8, r"\bf \texttt{[0x%02x]} %s" % (component.id, component.name), self.textattrs)
c.draw(path.rect(component_x + 0.1, 9.75, self.get_component_width(component), 0.10),
[deco.stroked(), deco.filled([color.grey(0.65)])])
c.insert(A)
event_x = component_x + 0.15
component_x = component_x + self.get_component_width(component) + 0.1
# Draw vertical lines for published events
for event in component.events.publish:
eventArrowsSource.append(path.line(event_x, 9.75, event_x, self.get_y(event)))
eventCirclesSource.append([event_x, 9.75])
if sinklessDict[event.name]:
eventCirclesSinkless.append([event_x, self.get_y(event)])
# write name of event
A = text.text(event_x, 8.1, r"\bf \texttt{[0x%02x]} %s" % (event.id, event.name), self.texteventrotattrs)
c.insert(A)
# Store most left position
try:
eventsLeft[event.name] = event_x
except:
pass
# this could be the most right position
eventsRight[event.name] = event_x
event_x = event_x + 0.1
for event in component.events.subscribe:
eventArrowsSink.append(path.line(event_x, self.get_y(event), event_x, 9.75))
if sourcelessDict[event.name]:
eventCirclesSourcless.append([event_x, self.get_y(event)])
else:
eventCirclesSink.append([event_x, self.get_y(event)])
A = text.text(event_x, 8.1, r"\bf \texttt{[0x%02x]} %s" % (event.id, event.name), self.texteventsubscribedrotattrs)
c.insert(A)
# Store most left position
try:
eventsLeft[event.name] = event_x
except:
pass
eventsRight[event.name] = event_x
event_x = event_x + 0.1
### Draw actions if selected
if self.options.actions:
action_x = event_x
for action in component.actions:
c.stroke(path.line(action_x, 8.05, action_x, 9.75),
[style.linewidth.THick, color.rgb(0.0, 0.5, 0.1),
deco.earrow([deco.stroked([color.rgb(0.0, 0.5, 0.1), style.linejoin.round]),
deco.filled([color.rgb(0.0, 0.5, 0.1)])], size=0.05)])
A = text.text(action_x, 8.1, r"\bf %s" % action.name, self.textactionrotattrs)
c.insert(A)
action_x = action_x + 0.1
### Now we know from where to where to draw the thick lines
# horizontal lines for events
# eventsLeft = SingleAssignDictionary('EventLeft')
# eventsRight = SingleAssignDictionary('EventRight')
for event in self.tree.events:
event_y = self.get_y(event)
try:
event_x_left = eventsLeft[event.name]
event_x_right = eventsRight[event.name]
c.stroke(path.line(event_x_left, event_y, event_x_right, event_y),
[style.linewidth.THick, color.grey(0.40)])
except:
pass
### Draw everything in the right order
for p in eventArrowsSource:
c.stroke(p, [style.linewidth.THick, color.rgb.red,
deco.earrow([deco.stroked([color.rgb.red, style.linejoin.round]),
deco.filled([color.rgb.red])], size=0.05)])
for p in eventArrowsSink:
c.stroke(p, [style.linewidth.THick, color.rgb.blue,
deco.earrow([deco.stroked([color.rgb.blue, style.linejoin.round]),
deco.filled([color.rgb.blue])], size=0.05)])
for x, y in eventCirclesSource:
c.fill(path.circle(x, y, 0.02), [color.rgb.red])
for x, y in eventCirclesSink:
c.fill(path.circle(x, y, 0.02), [color.rgb.blue])
for x, y in eventCirclesSinkless:
c.stroke(path.circle(x, y, 0.02), [style.linewidth.THick, color.rgb.red])
for x, y in eventCirclesSourcless:
c.stroke(path.circle(x, y, 0.02), [style.linewidth.THick, color.rgb.blue])
### Write the PDF
if os.path.splitext(self.options.outpath)[1] == '':
file = os.path.join(self.options.outpath, 'system.pdf')
else:
file = self.options.outpath
c.writePDFfile(file)
print "The system layout is shown in ", file
def setup(self, optparser):
optparser.add_option(
"-a", "--actions",
action="store_true",
dest = "actions",
default = False,
help = "Also display actions of components [optional]")
optparser.add_option(
"-s", "--skip",
action = "append",
dest = "skipList",
help = "Skip containers, e.g. simulator.")
if __name__ == '__main__':
SystemLayoutBuilder().run()
|
|
from freeipa_base_action_test_case import FreeIPABaseActionTestCase
import ipa_action
from ipa_command_args_options import IPA_COMMAND_ARGS_OPTIONS
import copy
import mock
import requests
class TestActionsIpaAction(FreeIPABaseActionTestCase):
__test__ = True
action_cls = ipa_action.IpaAction
def test_init(self):
action = self.get_action_instance({})
self.assertIsInstance(action, ipa_action.IpaAction)
self.assertIsInstance(action.session, requests.Session)
def test__resolve_connection_from_config(self):
action = self.get_action_instance(self.config_good)
connection_name = 'base'
connection_config = self.config_good['connections'][connection_name]
connection_expected = {'connection': connection_name}
connection_expected.update(connection_config)
connection_result = action._resolve_connection(connection=connection_name)
self.assertEqual(connection_result, connection_expected)
def test__resolve_connection_from_config_missing(self):
action = self.get_action_instance(self.config_good)
connection_name = 'this_connection_doesnt_exist'
with self.assertRaises(KeyError):
action._resolve_connection(connection=connection_name)
def test__resolve_connection_from_config_defaults(self):
action = self.get_action_instance(self.config_good)
connection_name = 'base'
connection_config = self.config_good['connections'][connection_name]
connection_expected = {'connection': connection_name}
connection_expected.update(connection_config)
connection_result = action._resolve_connection(connection=connection_name)
self.assertEqual(connection_result, connection_expected)
def test__resolve_connection_from_kwargs(self):
action = self.get_action_instance(self.config_blank)
kwargs = {'connection': None,
'server': 'kwargs_server',
'username': 'kwargs_user',
'password': 'kwargs_password'}
connection_expected = copy.deepcopy(kwargs)
connection_result = action._resolve_connection(**kwargs)
self.assertEqual(connection_result, connection_expected)
def test__resolve_connection_from_kwargs_defaults(self):
action = self.get_action_instance(self.config_blank)
kwargs = {'connection': None,
'server': 'kwargs_server',
'username': 'kwargs_user',
'password': 'kwargs_password'}
connection_expected = copy.deepcopy(kwargs)
connection_result = action._resolve_connection(**kwargs)
self.assertEqual(connection_result, connection_expected)
def test__resolve_connection_from_kwargs_extras(self):
action = self.get_action_instance(self.config_blank)
kwargs = {'connection': None,
'server': 'kwargs_server',
'username': 'kwargs_user',
'password': 'kwargs_password',
"extra_key1": "extra_value1",
"extra_key2": 234}
connection_expected = copy.deepcopy(kwargs)
connection_result = action._resolve_connection(**kwargs)
self.assertEqual(connection_result, connection_expected)
def test__resolve_connection_kwargs_overwrites_config(self):
action = self.get_action_instance(self.config_good)
connection_name = 'full'
connection_config = self.config_good['connections'][connection_name]
kwargs = {'connection': connection_name,
'server': 'kwargs_server',
'username': 'kwargs_user'}
connection_expected = copy.deepcopy(kwargs)
connection_expected['password'] = connection_config['password']
connection_expected['verify_ssl'] = connection_config['verify_ssl']
connection_result = action._resolve_connection(**kwargs)
self.assertEqual(connection_result, connection_expected)
def test__validate_connection(self):
action = self.get_action_instance(self.config_blank)
connection = {}
for key in ipa_action.CONNECTION_OPTIONS:
connection[key] = "dummy value"
result = action._validate_connection(connection)
self.assertTrue(result)
def test__validate_connection_missing_raises(self):
action = self.get_action_instance(self.config_blank)
connection = {}
with self.assertRaises(KeyError):
action._validate_connection(connection)
def test__validate_connection_none_raises(self):
action = self.get_action_instance(self.config_blank)
connection = {}
for key in ipa_action.CONNECTION_OPTIONS:
connection[key] = None
with self.assertRaises(KeyError):
action._validate_connection(connection)
def test__ipa_url(self):
action = self.get_action_instance(self.config_blank)
expected = "https://server.domain.tld/ipa/api/host_add"
result = action._ipa_url("server.domain.tld", "/api/host_add")
self.assertEqual(result, expected)
def test__ipa_url_no_endpoint(self):
action = self.get_action_instance(self.config_blank)
expected = "https://server.domain.tld/ipa"
result = action._ipa_url("server.domain.tld")
self.assertEqual(result, expected)
def test__login_success(self):
# setup
action = self.get_action_instance(self.config_good)
connection = self.config_good['connections']['base']
expected_session = "session123"
mock_response = mock.Mock(cookies={'ipa_session': expected_session},
reason=None,
status_code=200)
mock_session = mock.Mock()
mock_session.post.return_value = mock_response
action.session = mock_session
url = 'https://{0}/ipa/session/login_password'.format(connection['server'])
headers = {"referer": 'https://{0}/ipa'.format(connection['server']),
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
payload = 'user={0}&password={1}'.format(connection['username'],
connection['password'])
# execute
result = action._login(connection)
# verify
mock_session.post.assert_called_with(url,
headers=headers,
data=payload)
self.assertEqual(result, expected_session)
def test__login_error(self):
# setup
action = self.get_action_instance(self.config_good)
connection = self.config_good['connections']['base']
mock_response = mock.Mock(cookies={'xxx': ''},
reason=None,
status_code=200)
mock_session = mock.Mock()
mock_session.post.return_value = mock_response
action.session = mock_session
url = 'https://{0}/ipa/session/login_password'.format(connection['server'])
headers = {"referer": 'https://{0}/ipa'.format(connection['server']),
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
payload = 'user={0}&password={1}'.format(connection['username'],
connection['password'])
# execute
with self.assertRaises(RuntimeError):
action._login(connection)
# verify
mock_session.post.assert_called_with(url,
headers=headers,
data=payload)
def test__login_http_error(self):
# setup
action = self.get_action_instance(self.config_good)
connection = self.config_good['connections']['base']
mock_response = mock.Mock(cookies={'xxx': ''},
reason=None,
status_code=401)
mock_session = mock.Mock()
mock_session.post.return_value = mock_response
action.session = mock_session
url = 'https://{0}/ipa/session/login_password'.format(connection['server'])
headers = {"referer": 'https://{0}/ipa'.format(connection['server']),
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
payload = 'user={0}&password={1}'.format(connection['username'],
connection['password'])
# execute
with self.assertRaises(requests.HTTPError):
action._login(connection)
# verify
mock_session.post.assert_called_with(url,
headers=headers,
data=payload)
def test__create_payload(self):
action = self.get_action_instance(self.config_good)
method = 'hostgroup_show'
args = IPA_COMMAND_ARGS_OPTIONS[method]['args']
options = IPA_COMMAND_ARGS_OPTIONS[method]['options']
kwargs = {
'method': method,
}
expected_args = []
for i, a in enumerate(args):
kwargs[a] = i
expected_args.append(i)
expected_options = {}
for i, o in enumerate(options):
kwargs[o] = i
expected_options[o] = i
result = action._create_payload(**kwargs)
self.assertEqual(result, {
"id": 0,
"method": method,
"params": [
expected_args,
expected_options,
],
})
def test__create_payload_api_version(self):
action = self.get_action_instance(self.config_good)
method = 'hostgroup_show'
args = IPA_COMMAND_ARGS_OPTIONS[method]['args']
options = IPA_COMMAND_ARGS_OPTIONS[method]['options']
kwargs = {
'method': method,
}
expected_args = []
for i, a in enumerate(args):
kwargs[a] = i
expected_args.append(i)
expected_options = {}
for i, o in enumerate(options):
kwargs[o] = i
expected_options[o] = i
expected_options['version'] = 99
result = action._create_payload(api_version=99, **kwargs)
self.assertEqual(result, {
"id": 0,
"method": method,
"params": [
expected_args,
expected_options,
],
})
def test__execute_success(self):
# setup
action = self.get_action_instance(self.config_good)
connection = self.config_good['connections']['base']
server = connection['server']
session = "session123"
method = 'hostgroup_show'
args = IPA_COMMAND_ARGS_OPTIONS[method]['args']
options = IPA_COMMAND_ARGS_OPTIONS[method]['options']
kwargs = {
'method': method,
}
expected_args = []
for i, a in enumerate(args):
kwargs[a] = i
expected_args.append(i)
expected_options = {}
for i, o in enumerate(options):
kwargs[o] = i
expected_options[o] = i
expected_dict = {'result': 'value'}
mock_response = mock.Mock(reason=None,
status_code=200)
mock_response.json.return_value = expected_dict
mock_session = mock.Mock()
mock_session.post.return_value = mock_response
action.session = mock_session
url = 'https://{0}/ipa/session/json'.format(server)
headers = {"referer": 'https://{0}/ipa'.format(server),
"Content-Type": "application/json",
"Accept": "application/json"}
payload = {"id": 0,
"method": kwargs['method'],
"params": [expected_args,
expected_options]}
# execute
result = action._execute(session, server, **kwargs)
# verify
mock_session.post.assert_called_with(url,
headers=headers,
json=payload,
cookies={'ipa_session': session})
self.assertEqual(result, (True, expected_dict))
def test__execute_error(self):
# setup
action = self.get_action_instance(self.config_good)
connection = self.config_good['connections']['base']
server = connection['server']
session = "session123"
method = 'hostgroup_show'
args = IPA_COMMAND_ARGS_OPTIONS[method]['args']
options = IPA_COMMAND_ARGS_OPTIONS[method]['options']
kwargs = {
'method': method,
}
expected_args = []
for i, a in enumerate(args):
kwargs[a] = i
expected_args.append(i)
expected_options = {}
for i, o in enumerate(options):
kwargs[o] = i
expected_options[o] = i
expected_dict = {'error': 'value'}
expected_result = (False, expected_dict)
mock_response = mock.Mock(reason=None,
status_code=200)
mock_response.json.return_value = expected_dict
mock_session = mock.Mock()
mock_session.post.return_value = mock_response
action.session = mock_session
url = 'https://{0}/ipa/session/json'.format(server)
headers = {"referer": 'https://{0}/ipa'.format(server),
"Content-Type": "application/json",
"Accept": "application/json"}
payload = {"id": 0,
"method": kwargs['method'],
"params": [expected_args,
expected_options]}
# execute
result = action._execute(session, server, **kwargs)
# verify
mock_session.post.assert_called_with(url,
headers=headers,
json=payload,
cookies={'ipa_session': session})
self.assertEqual(result, expected_result)
def test__execute_http_error(self):
# setup
action = self.get_action_instance(self.config_good)
connection = self.config_good['connections']['base']
server = connection['server']
session = "session123"
method = 'hostgroup_show'
args = IPA_COMMAND_ARGS_OPTIONS[method]['args']
options = IPA_COMMAND_ARGS_OPTIONS[method]['options']
kwargs = {
'method': method,
}
expected_args = []
for i, a in enumerate(args):
kwargs[a] = i
expected_args.append(i)
expected_options = {}
for i, o in enumerate(options):
kwargs[o] = i
expected_options[o] = i
mock_response = mock.Mock(reason=None,
status_code=400)
mock_session = mock.Mock()
mock_session.post.return_value = mock_response
action.session = mock_session
url = 'https://{0}/ipa/session/json'.format(server)
headers = {"referer": 'https://{0}/ipa'.format(server),
"Content-Type": "application/json",
"Accept": "application/json"}
payload = {"id": 0,
"method": kwargs['method'],
"params": [expected_args,
expected_options]}
# execute
with self.assertRaises(requests.HTTPError):
action._execute(session, server, **kwargs)
# verify
mock_session.post.assert_called_with(url,
headers=headers,
json=payload,
cookies={'ipa_session': session})
@mock.patch('ipa_action.IpaAction._execute')
def test__get_api_version(self, mock__execute):
# setup
action = self.get_action_instance(self.config_blank)
ping_good = True
data = {'result': {'summary': "API version 1.234"}}
mock__execute.return_value = (ping_good, data)
session = 'session123'
server = 'server.domain.tld'
# execute
result = action._get_api_version(session, server)
# verify
self.assertEqual(result, "1.234")
@mock.patch('ipa_action.IpaAction._execute')
def test__get_api_version_missing(self, mock__execute):
# setup
action = self.get_action_instance(self.config_blank)
ping_good = True
data = {'result': {}}
mock__execute.return_value = (ping_good, data)
session = 'session123'
server = 'server.domain.tld'
# execute
result = action._get_api_version(session, server)
# verify
self.assertEqual(result, None)
@mock.patch('ipa_action.IpaAction._execute')
def test__get_api_version_bad_ping(self, mock__execute):
# setup
action = self.get_action_instance(self.config_blank)
ping_good = False
data = {'result': {'summary': "API version 1.234"}}
mock__execute.return_value = (ping_good, data)
session = 'session123'
server = 'server.domain.tld'
# execute
result = action._get_api_version(session, server)
# verify
self.assertEqual(result, None)
@mock.patch('ipa_action.IpaAction._execute')
def test__get_api_version_bad_regex(self, mock__execute):
# setup
action = self.get_action_instance(self.config_blank)
ping_good = True
data = {'result': {'summary': "API version bad string 1.234"}}
mock__execute.return_value = (ping_good, data)
session = 'session123'
server = 'server.domain.tld'
# execute
result = action._get_api_version(session, server)
# verify
self.assertEqual(result, None)
def test_run_login_existing_session(self):
# setup
action = self.get_action_instance(self.config_blank)
kwargs_dict = {'method': 'login',
'session': 'session123',
'server': 'server.domain.tld',
'username': 'test',
'password': 'abc123',
'verify_ssl': False}
# execute
result = action.run(**kwargs_dict)
# verify
self.assertEqual(result, 'session123')
self.assertEqual(action.session.verify, False)
@mock.patch('ipa_action.IpaAction._login')
def test_run_login_missing_session(self, mock__login):
# setup
action = self.get_action_instance(self.config_blank)
kwargs_dict = {'method': 'login',
'server': 'server.domain.tld',
'session': None,
'username': 'username123',
'password': 'password123',
'verify_ssl': True}
mock__login.return_value = 'session123'
# execute
result = action.run(**kwargs_dict)
# verify
self.assertEqual(result, 'session123')
self.assertEqual(action.session.verify, True)
@mock.patch('ipa_action.IpaAction._get_api_version')
@mock.patch('ipa_action.IpaAction._execute')
def test_run_execute(self, mock__execute, mock__get_api_version):
# setup
action = self.get_action_instance(self.config_blank)
kwargs = {'method': 'host_add',
'session': 'session123',
'server': 'server.domain.tld',
'username': 'username123',
'password': 'password123',
'verify_ssl': True}
mock__get_api_version.return_value = '1.234'
mock__execute.return_value = (True, {'data': 'value'})
# execute
result = action.run(**kwargs)
# verify
self.assertEqual(result, (True, {'data': 'value'}))
self.assertEqual(action.session.verify, True)
mock__get_api_version.assert_called_with('session123', 'server.domain.tld')
mock__execute.assert_called_with('session123',
'server.domain.tld',
method='host_add',
api_version='1.234',
username='username123',
password='password123')
|
|
#!/usr/bin/env python
__author__ = 'heroico'
########################################################################################################################
# Gathers statistics on allele information.
# Produces a csv with the following columns:
# rsid,chromosome,wdb_ref_allele,wdb_eff_allele,legend_ref_allele,legend_eff_allele,legend_type,gwas_ref_allele,gwas_eff_allele,gwas_OR_BETA
#
# TODO: needs maintenance
import logging
import os
import metax.ThousandGenomesUtilities as ThousandGenomesUtilities
import metax.GWASUtilities as GWASUtilities
import metax.Utilities as Utilities
import metax.WeightDBUtilities as WeightDBUtilities
import metax.Logging as Logging
class AlleleStats(object):
def __init__(self, rsid, chromosome, weight_db_entry):
self.rsid = rsid
self.chromosome = chromosome
self.weight_db_ref_allele = weight_db_entry.ref_allele
self.weight_db_eff_allele = weight_db_entry.eff_allele
self.gwas_ref_allele = "NA"
self.gwas_eff_allele = "NA"
self.gwas_OR_BETA = "NA"
self.legend_type = "NA"
self.legend_ref_allele = "NA"
self.legend_eff_allele = "NA"
def getGWASEntryData(self, gwas_entry):
if gwas_entry:
self.gwas_ref_allele = gwas_entry[GWASUtilities.GWASSNPInfoLineCollector.A1]
self.gwas_eff_allele = gwas_entry[GWASUtilities.GWASSNPInfoLineCollector.A2]
self.gwas_OR_BETA = gwas_entry[GWASUtilities.GWASSNPInfoLineCollector.OR_BETA]
else:
self.gwas_ref_allele = "NA"
self.gwas_eff_allele = "NA"
self.gwas_OR_BETA = "NA"
def getLegendData(self, type, a0, a1):
self.legend_type = type if type is not None else "NA"
self.legend_ref_allele = a0 if a0 is not None else "NA"
self.legend_eff_allele = a1 if a1 is not None else "NA"
def toCSVLine(self):
tuple = (self.rsid, self.chromosome,
self.weight_db_ref_allele, self.weight_db_eff_allele,
self.legend_ref_allele, self.legend_eff_allele, self.legend_type,
self.gwas_ref_allele, self.gwas_eff_allele, self.gwas_OR_BETA)
line = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % tuple
return line
@classmethod
def CSVHeader(cls):
return "rsid,chromosome,wdb_ref_allele,wdb_eff_allele,legend_ref_allele,legend_eff_allele,legend_type,gwas_ref_allele,gwas_eff_allele,gwas_OR_BETA\n"
class ProcessAlleleStatistics(object):
def __init__(self, args):
self.data_folder = args.data_folder
self.weight_db = args.weight_db
self.db_path = os.path.join(self.data_folder, args.weight_db)
self.data_folder_phase = args.phase_folder
self.data_folder_gwas_dosage = args.gwas_dosage_folder
self.output_file = args.output_file
def run(self):
if os.path.exists(self.output_file):
logging.info("File %s already exists, delete it if you want it calculated again", self.output_file)
return
logging.info("Opening %s", self.weight_db)
weight_db_logic = WeightDBUtilities.WeightDBEntryLogic(self.db_path)
CHROMOSOMES = ["chr"+str(x) for x in xrange(1, 23)]
dosage_names = Utilities.dosageNamesFromFolder(self.data_folder_gwas_dosage)
legend_names = Utilities.legendNamesFromFolder(self.data_folder_phase)
findings={}
for chromosome in CHROMOSOMES:
logging.info("Processing chromosome %s", chromosome)
dosage_name = Utilities.removeNameWithPatterns(dosage_names, [chromosome+"."])
dosage = self.loadDosageFile(self.data_folder_gwas_dosage, dosage_name)
self.processDosage(chromosome, weight_db_logic, dosage, findings)
legend_name = Utilities.removeNameEndingWith(legend_names, chromosome)
self.processLegendName(chromosome, weight_db_logic, dosage, findings, legend_name)
with open(self.output_file, "w") as file:
file.write(AlleleStats.CSVHeader())
def sortByChromosome(finding):
return finding.chromosome
entries = sorted(findings.values(), key=sortByChromosome)
for finding in entries:
line = finding.toCSVLine()
file.write(line)
def loadDosageFile(self, base_path, name):
callback = GWASUtilities.GWASSNPInfoLineCollector()
dosage_loader = GWASUtilities.GWASDosageFileLoader(base_path, name, callback)
keyed_data_set = dosage_loader.load()
return keyed_data_set
def processDosage(self, chromosome, weight_db_logic, dosage, findings):
ok = 0
for rsid, dosage_entry in dosage.values_by_key.iteritems():
weight_db_entry = weight_db_logic.anEntryWithRSID(rsid)
if not weight_db_entry:
logging.log(7, "%s in dosage not in weights", rsid)
continue
a1 = dosage_entry[GWASUtilities.GWASSNPInfoLineCollector.A1]
a2 = dosage_entry[GWASUtilities.GWASSNPInfoLineCollector.A2]
OR= dosage_entry[GWASUtilities.GWASSNPInfoLineCollector.OR]
if not weight_db_entry.ref_allele == a1 or \
not weight_db_entry.eff_allele == a2 or \
OR == "NA":
logging.log(7, "%s in dosage is problematic (%s, %s)(%s, %s, %s)", rsid, weight_db_entry.ref_allele, weight_db_entry.eff_allele, a1, a2, OR)
finding = appropriateFinding(findings, rsid, chromosome, weight_db_entry)
finding.getGWASEntryData(dosage_entry)
continue
ok += 1
logging.log(8,"After processing dosage, %d snps were found to be ok", ok)
def processLegendName(self, chromosome, weight_db_logic, dosage, findings, legend_name):
class LegendCallback(object):
def __init__(self):
pass
def __call__(self, i, comps):
id = comps[ThousandGenomesUtilities.ILTF.ID]
id_components = id.split(':')
rsid = id_components[0]
weight_db_entry = weight_db_logic.anEntryWithRSID(rsid)
if not weight_db_entry:
logging.log(8, "rsid %s from legend not in db, %s", rsid, id)
return
type = comps[ThousandGenomesUtilities.ILTF.TYPE]
a0 = comps[ThousandGenomesUtilities.ILTF.A0]
a1 = comps[ThousandGenomesUtilities.ILTF.A1]
if rsid in findings:
finding = findings[rsid]
finding.getLegendData(type, a0, a1)
move_on = True
if not type == "Biallelic_SNP":
logging.log(8, "%s %s Not biallelic: %s", chromosome, id, type)
move_on = False
else:
if (a0 == 'T' and a1 == 'A') or \
(a0 == 'A' and a1 == 'T') or \
(a0 == 'C' and a1 == 'G') or \
(a0 == 'G' and a1 == 'C'):
logging.log(8, "%s %s Problematic: %s, %s", chromosome, id, a0, a1)
move_on = False
if not weight_db_entry.ref_allele == a0 or \
not weight_db_entry.eff_allele == a1:
logging.log(8, "%s %s Different alleles %s %s", chromosome, id, a0, a1)
move_on = False
if not move_on:
finding = appropriateFinding(findings, rsid, chromosome, weight_db_entry)
finding.getLegendData(type,a0,a1)
dosage_entry = None
if rsid in dosage.values_by_key:
dosage_entry = dosage.values_by_key[rsid]
finding.getGWASEntryData(dosage_entry)
callback = LegendCallback()
loader = ThousandGenomesUtilities.LEGENDLoader(self.data_folder_phase, legend_name)
loader.iterateOverFileLegends(callback)
def appropriateFinding(findings, rsid, chromosome, weight_db_entry):
finding = None
if rsid in findings:
finding = findings[rsid]
else:
finding = AlleleStats(rsid, chromosome, weight_db_entry)
findings[rsid] = finding
return finding
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Build correlations from PHASE3 data and weights database.')
parser.add_argument("--data_folder",
help="higher level data folder",
default="data")
parser.add_argument("--weight_db",
help="name of weight db in data folder",
default="DGN-WB_0.5.db")
parser.add_argument("--phase_folder",
help="name of folder containing PHASE 3 data",
default="data/1000GP_Phase3")
parser.add_argument("--gwas_dosage_folder",
help="name of folder containing dosage data",
default="data/T1D-GWAS")
parser.add_argument("--output_file",
help="name of file to dump results in",
default="results/allele_stats.csv")
args = parser.parse_args()
Logging.configureLogging(logging.INFO)
work = ProcessAlleleStatistics(args)
work.run()
|
|
"""
Basic tests of DatasetAssembler.
Some features are testsed in other sibling test files, such as alternative
naming conventions.
"""
from datetime import datetime, timezone
from pathlib import Path
from pprint import pprint
from textwrap import dedent
from uuid import UUID
import numpy
import pytest
from ruamel import yaml
from eodatasets3 import DatasetAssembler, DatasetPrepare, namer, serialise
from eodatasets3.images import GridSpec
from eodatasets3.model import DatasetDoc
from tests import assert_file_structure
from tests.common import assert_expected_eo3_path, assert_same
def test_dea_style_package(
l1_ls8_dataset: DatasetDoc, l1_ls8_dataset_path: Path, tmp_path: Path
):
out = tmp_path
[blue_geotiff_path] = l1_ls8_dataset_path.rglob("L*_B2.TIF")
with DatasetAssembler(out, naming_conventions="dea") as p:
# We add a source dataset, asking to inherit the common properties (eg. platform, instrument, datetime)
p.add_source_path(l1_ls8_dataset_path, auto_inherit_properties=True)
# It's a GA product of "numerus-unus" ("the number one").
p.producer = "ga.gov.au"
p.product_family = "ones"
p.dataset_version = "3.0.0"
# Known properties are normalised (see tests at bottom of file)
p.platform = "LANDSAT_8" # to: 'landsat-8'
p.processed = "2016-03-04 14:23:30Z" # into a date.
p.maturity = "FINAL" # lowercased
p.properties["eo:off_nadir"] = "34" # into a number
# Write a measurement from a numpy array, using the source dataset's grid spec.
p.write_measurement_numpy(
"ones",
numpy.ones((60, 60), numpy.int16),
GridSpec.from_dataset_doc(l1_ls8_dataset),
nodata=-999,
)
# Copy a measurement from an input file (it will write a COG with DEA naming conventions)
p.write_measurement("blue", blue_geotiff_path)
# Alternatively, all measurements could be by reference rather that a copy:
# p.note_measurement("external_blue", blue_geotiff_path)
# (See an example of referencing in eodatasets3/prepare/landsat_l1_prepare.py )
# Write a thumbnail using the given bands as r/g/b.
p.write_thumbnail("ones", "ones", "blue")
# Write a singleband thumbnail using a bit flag
p.write_thumbnail_singleband("blue", bit=1, kind="singleband")
# Write a singleband thumbnail using a lookuptable
p.write_thumbnail_singleband(
"blue", lookup_table={1: (0, 0, 255)}, kind="singleband_lut"
)
# Note any software versions important to this created data.
p.note_software_version(
"numerus-unus-processor",
"https://github.com/GeoscienceAustralia/eo-datasets",
"1.2.3",
)
# p.done() will validate the dataset and write it to the destination atomically.
dataset_id, metadata_path = p.done()
assert isinstance(dataset_id, UUID), "Expected a random UUID to be assigned"
out = tmp_path / "ga_ls8c_ones_3/090/084/2016/01/21"
assert out == metadata_path.parent
assert_file_structure(
out,
{
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final.odc-metadata.yaml": "",
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final_blue.tif": "",
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final_ones.tif": "",
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final_thumbnail.jpg": "",
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final.proc-info.yaml": "",
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final.sha1": "",
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final_singleband-thumbnail.jpg": "",
"ga_ls8c_ones_3-0-0_090084_2016-01-21_final_singleband-lut-thumbnail.jpg": "",
},
)
# TODO: check sha1 checksum list.
assert_expected_eo3_path(
{
"$schema": "https://schemas.opendatacube.org/dataset",
"id": dataset_id,
"label": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final",
"product": {
# This was added automatically because we chose 'dea' conventions.
"href": "https://collections.dea.ga.gov.au/product/ga_ls8c_ones_3",
"name": "ga_ls8c_ones_3",
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[879_315.0, -3_714_585.0],
[641_985.0, -3_714_585.0],
[641_985.0, -3_953_115.0],
[879_315.0, -3_953_115.0],
[879_315.0, -3_714_585.0],
]
],
"type": "Polygon",
},
"grids": {
# Note that the two bands had identical grid specs, so it combined them into one grid.
"default": {
"shape": [60, 60],
"transform": [
3955.5,
0.0,
641_985.0,
0.0,
-3975.500_000_000_000_5,
-3_714_585.0,
0.0,
0.0,
1.0,
],
}
},
"measurements": {
"blue": {"path": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final_blue.tif"},
"ones": {"path": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final_ones.tif"},
},
"properties": {
"datetime": datetime(2016, 1, 21, 23, 50, 23, 54435),
"dea:dataset_maturity": "final",
"odc:dataset_version": "3.0.0",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": "2016-03-04T14:23:30",
"odc:producer": "ga.gov.au",
"odc:product_family": "ones",
# The remaining fields were inherited from the source dataset
# (because we set auto_inherit_properties=True, and they're in the whitelist)
"eo:platform": "landsat-8", # matching Stac's examples for capitalisation.
"eo:instrument": "OLI_TIRS", # matching Stac's examples for capitalisation.
"eo:cloud_cover": 93.22,
"eo:off_nadir": 34.0,
"eo:gsd": 15.0,
"eo:sun_azimuth": 74.007_443_8,
"eo:sun_elevation": 55.486_483,
"landsat:collection_category": "T1",
"landsat:collection_number": 1,
"landsat:landsat_product_id": "LC08_L1TP_090084_20160121_20170405_01_T1",
"landsat:landsat_scene_id": "LC80900842016021LGN02",
"landsat:wrs_path": 90,
"landsat:wrs_row": 84,
"odc:region_code": "090084",
},
"accessories": {
# It wrote a checksum file for all of our files.
"checksum:sha1": {
"path": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final.sha1"
},
# We didn't add any extra processor metadata, so this just contains
# some software versions.
"metadata:processor": {
"path": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final.proc-info.yaml"
},
# The thumbnails we made.
"thumbnail": {
"path": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final_thumbnail.jpg"
},
"thumbnail:singleband": {
"path": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final_singleband-thumbnail.jpg"
},
"thumbnail:singleband_lut": {
"path": "ga_ls8c_ones_3-0-0_090084_2016-01-21_final_singleband-lut-thumbnail.jpg"
},
},
"lineage": {"level1": ["a780754e-a884-58a7-9ac0-df518a67f59d"]},
},
metadata_path,
)
def test_minimal_package_with_product_name(tmp_path: Path, l1_ls8_folder: Path):
"""
You can specify an ODC product name manually to avoid most of the name generation.
"""
out = tmp_path / "out"
out.mkdir()
[blue_geotiff_path] = l1_ls8_folder.rglob("L*_B2.TIF")
with DatasetAssembler(out) as p:
p.datetime = datetime(2019, 7, 4, 13, 7, 5)
p.product_name = "loch_ness_sightings"
p.processed = datetime(2019, 7, 4, 13, 8, 7)
p.write_measurement("blue", blue_geotiff_path)
dataset_id, metadata_path = p.done()
assert dataset_id is not None
assert_file_structure(
out,
{
"loch_ness_sightings": {
"2019": {
"07": {
"04": {
# Set a dataset version to get rid of 'beta' label.
"loch_ness_sightings_2019-07-04.odc-metadata.yaml": "",
"loch_ness_sightings_2019-07-04.proc-info.yaml": "",
"loch_ness_sightings_2019-07-04_blue.tif": "",
"loch_ness_sightings_2019-07-04.sha1": "",
}
}
}
}
},
)
def test_in_memory_dataset(tmp_path: Path, l1_ls8_folder: Path):
"""
You can create metadata fully in-memory, without touching paths.
"""
out = tmp_path / "out"
out.mkdir()
[blue_geotiff_path] = l1_ls8_folder.rglob("L*_B2.TIF")
dataset_location = out / "my/custom/dataset/path/ls_whatever.stac-item.json"
p = DatasetPrepare(dataset_location=dataset_location)
p.datetime = datetime(2019, 7, 4, 13, 7, 5)
p.product_name = "loch_ness_sightings"
p.processed = datetime(2019, 7, 4, 13, 8, 7)
pretend_path = dataset_location.parent / "our_image_dont_read_it.tif"
p.note_measurement(
"blue",
pretend_path,
# We give it grid information, so it doesn't have to read it itself.
# (reading will fail if it tries, because the path is fake!)
grid=GridSpec.from_path(blue_geotiff_path),
pixels=numpy.ones((60, 60), numpy.int16),
nodata=-1,
)
dataset: DatasetDoc = p.to_dataset_doc()
doc: dict = serialise.to_doc(dataset)
# We're testing geometry calc in other tests.
assert doc["geometry"] is not None, "Expected geometry"
del doc["geometry"]
assert doc["id"] is not None, "Expected an id"
del doc["id"]
# Users can ask the generator for file names:
assert (
p.names.measurement_filename("red") == "loch_ness_sightings_2019-07-04_red.tif"
)
# The computed file paths are relative to our given dataset location.
out_url = out.as_uri()
assert (
p.names.resolve_file(p.names.measurement_filename("red"))
== f"{out_url}/my/custom/dataset/path/loch_ness_sightings_2019-07-04_red.tif"
)
pprint(doc)
assert_same(
{
"$schema": "https://schemas.opendatacube.org/dataset",
"label": "loch_ness_sightings_2019-07-04",
"crs": "epsg:32655",
"measurements": {"blue": {"path": "our_image_dont_read_it.tif"}},
"product": {"name": "loch_ness_sightings"},
"properties": {
"datetime": datetime(2019, 7, 4, 13, 7, 5, tzinfo=timezone.utc),
"odc:processing_datetime": datetime(
2019, 7, 4, 13, 8, 7, tzinfo=timezone.utc
),
"odc:product": "loch_ness_sightings",
},
"grids": {
"default": {
"shape": [60, 60],
"transform": [
3955.5,
0.0,
641985.0,
0.0,
-3975.5000000000005,
-3714585.0,
0.0,
0.0,
1.0,
],
}
},
"accessories": {},
"lineage": {},
},
doc,
)
def test_minimal_generated_naming_package(tmp_path: Path, l1_ls8_folder: Path):
"""
What's the minimum number of fields we can set and still generate file/product
names to produce a package?
"""
out = tmp_path / "out"
out.mkdir()
[blue_geotiff_path] = l1_ls8_folder.rglob("L*_B2.TIF")
with DatasetAssembler(out) as p:
p.datetime = datetime(2019, 7, 4, 13, 7, 5)
p.product_family = "quaternarius"
p.processed_now()
p.write_measurement("blue", blue_geotiff_path)
# A friendly __str__ for notebook/terminal users:
assert str(p) == dedent(
f"""
Assembling quaternarius (unfinished)
- 1 measurements: blue
- 4 properties: datetime, odc:file_format, odc:processing_datetime, odc:prod...
Writing to location: {out}/quaternarius/2019/07/04/quaternarius_2019-07-04.odc-metadata.yaml
"""
)
# p.done() will validate the dataset and write it to the destination atomically.
dataset_id, metadata_path = p.done()
assert dataset_id is not None
assert_file_structure(
out,
{
"quaternarius": {
"2019": {
"07": {
"04": {
# Set a dataset version to get rid of 'beta' label.
"quaternarius_2019-07-04.odc-metadata.yaml": "",
"quaternarius_2019-07-04.proc-info.yaml": "",
"quaternarius_2019-07-04_blue.tif": "",
"quaternarius_2019-07-04.sha1": "",
}
}
}
}
},
)
def test_generated_metadata_path(l1_ls7_tarball: Path):
"""
We can specify a dataset_location alone, such as a compressed tarball.
The metadata path will be a sibling file, and measurements will be read
from inside the tarball.
"""
with DatasetPrepare(
# Our location is a Path() to a local tar (with suffix '.tar')
dataset_location=l1_ls7_tarball
) as p:
p.datetime = datetime(2019, 7, 4, 13, 7, 5)
p.product_family = "tarred-product"
p.processed_now()
# When converting the Path() to a URL, it should convert tar paths to `tar://` scheme.
expected_dataset_location = f"tar:{l1_ls7_tarball.as_posix()}!/"
assert p.names.dataset_location == expected_dataset_location
# relative-to-location means our resulting URL will be a file inside our tar location:
p.note_measurement(
"green",
"LE07_L1TP_104078_20130429_20161124_01_T1_B2.TIF",
relative_to_dataset_location=True,
)
dataset_id, metadata_path = p.done(embed_location=True)
# The generated metadata-path has the same name as the tar, but with ".odc-metadata.yaml" suffix.
assert (
metadata_path
== l1_ls7_tarball.parent
/ "LE07_L1TP_104078_20130429_20161124_01_T1.odc-metadata.yaml"
)
assert_expected_eo3_path(
{
"$schema": "https://schemas.opendatacube.org/dataset",
"label": "tarred_product_2019-07-04",
"product": {"name": "tarred_product"},
"location": expected_dataset_location,
"measurements": {
"green": {"path": "LE07_L1TP_104078_20130429_20161124_01_T1_B2.TIF"}
},
"accessories": {},
"lineage": {},
},
metadata_path,
# Tested in other tests. We only care about paths.
ignore_fields=["id", "properties", "geometry", "grids", "crs"],
)
def test_dataset_no_measurements(tmp_path: Path):
"""Can we make a dataset with no measurements? (eg. telemetry data)"""
with DatasetAssembler(tmp_path) as p:
# A custom label too.
p.label = "chipmonk_sightings_2019"
p.datetime = datetime(2019, 1, 1)
p.product_family = "chipmonk_sightings"
p.processed_now()
dataset_id, metadata_path = p.done()
with metadata_path.open("r") as f:
doc = yaml.YAML(typ="safe").load(f)
assert doc["label"] == "chipmonk_sightings_2019", "Couldn't override label field"
def test_dataset_given_properties(tmp_path: Path):
"""Can we give existing properties to the assembler?"""
properties = {
"datetime": datetime(2019, 1, 1),
"odc:product_family": "chipmonk_sightings",
"odc:processing_datetime": "2021-06-15T01:33:43.378850",
}
names = namer(properties=properties)
with DatasetAssembler(tmp_path, names=names) as p:
# It should have normalised properties!
assert p.processed == datetime(2021, 6, 15, 1, 33, 43, 378850, timezone.utc)
dataset_id, metadata_path = p.done()
relative_path = metadata_path.relative_to(tmp_path)
assert relative_path == Path(
"chipmonk_sightings/2019/01/01/chipmonk_sightings_2019-01-01.odc-metadata.yaml"
)
@pytest.mark.parametrize(
"inherit_geom",
[True, False],
ids=["inherit geom from dataset", "don't inherit geom"],
)
def test_add_source_dataset(tmp_path: Path, inherit_geom):
from eodatasets3 import serialise
p = DatasetAssembler(tmp_path, naming_conventions="dea_c3")
source_dataset = serialise.from_path(
Path(__file__).parent / "data/LC08_L1TP_089080_20160302_20170328_01_T1.yaml"
)
p.add_source_dataset(
source_dataset, auto_inherit_properties=True, inherit_geometry=inherit_geom
)
if inherit_geom:
assert p.geometry == source_dataset.geometry
else:
assert p.geometry is None
p.maturity = "interim"
p.collection_number = "3"
p.dataset_version = "1.6.0"
p.producer = "ga.gov.au"
p.processed = "1998-07-30T12:23:23"
p.product_family = "wofs"
p.write_measurement(
"water",
Path(__file__).parent
/ "data/wofs/ga_ls_wofs_3_099081_2020-07-26_interim_water_clipped.tif",
)
id, path = p.done()
output = serialise.from_path(path)
if inherit_geom:
# POLYGON((609615 - 3077085, 378285 - 3077085, 378285 - 3310515, 609615 - 3310515, 609615 - 3077085))
assert output.geometry == source_dataset.geometry
else:
# POLYGON((684285 - 3439275, 684285 - 3444495, 689925 - 3444495, 689925 - 3439275, 684285 - 3439275))
# Geometry is not set from the source dataset, but instead from the added wofs measurement
assert output.geometry is not None
assert output.geometry != source_dataset.geometry
|
|
# -*- coding: utf-8 -*-
"""
pyrseas.dbobject.operclass
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module defines two classes: OperatorClass derived from
DbSchemaObject and OperatorClassDict derived from DbObjectDict.
"""
from pyrseas.dbobject import DbObjectDict, DbSchemaObject
from pyrseas.dbobject import commentable, ownable
class OperatorClass(DbSchemaObject):
"""An operator class"""
keylist = ['schema', 'name', 'index_method']
objtype = "OPERATOR CLASS"
single_extern_file = True
def extern_key(self):
"""Return the key to be used in external maps for this operator
:return: string
"""
return '%s %s using %s' % (self.objtype.lower(), self.name,
self.index_method)
def identifier(self):
"""Return a full identifier for an operator class
:return: string
"""
return "%s USING %s" % (self.qualname(), self.index_method)
def to_map(self, no_owner):
"""Convert operator class to a YAML-suitable format
:return: dictionary
"""
dct = self._base_map(no_owner)
if self.name == self.family:
del dct['family']
return dct
@commentable
@ownable
def create(self):
"""Return SQL statements to CREATE the operator class
:return: SQL statements
"""
dflt = ''
if hasattr(self, 'default') and self.default:
dflt = "DEFAULT "
clauses = []
for (strat, oper) in list(self.operators.items()):
clauses.append("OPERATOR %d %s" % (strat, oper))
for (supp, func) in list(self.functions.items()):
clauses.append("FUNCTION %d %s" % (supp, func))
if hasattr(self, 'storage'):
clauses.append("STORAGE %s" % self.storage)
return ["CREATE OPERATOR CLASS %s\n %sFOR TYPE %s USING %s "
"AS\n %s" % (
self.qualname(), dflt, self.type, self.index_method,
',\n ' .join(clauses))]
class OperatorClassDict(DbObjectDict):
"The collection of operator classes in a database"
cls = OperatorClass
query = \
"""SELECT nspname AS schema, opcname AS name, rolname AS owner,
amname AS index_method, opfname AS family,
opcintype::regtype AS type, opcdefault AS default,
opckeytype::regtype AS storage,
obj_description(o.oid, 'pg_opclass') AS description
FROM pg_opclass o JOIN pg_am a ON (opcmethod = a.oid)
JOIN pg_roles r ON (r.oid = opcowner)
JOIN pg_opfamily f ON (opcfamily = f.oid)
JOIN pg_namespace n ON (opcnamespace = n.oid)
WHERE (nspname != 'pg_catalog' AND nspname != 'information_schema')
AND o.oid NOT IN (
SELECT objid FROM pg_depend WHERE deptype = 'e'
AND classid = 'pg_opclass'::regclass)
ORDER BY nspname, opcname, amname"""
opquery = \
"""SELECT nspname AS schema, opcname AS name, amname AS index_method,
amopstrategy AS strategy, amopopr::regoperator AS operator
FROM pg_opclass o JOIN pg_am a ON (opcmethod = a.oid)
JOIN pg_namespace n ON (opcnamespace = n.oid), pg_amop ao,
pg_depend
WHERE refclassid = 'pg_opclass'::regclass
AND classid = 'pg_amop'::regclass AND objid = ao.oid
AND refobjid = o.oid
AND (nspname != 'pg_catalog' AND nspname != 'information_schema')
AND o.oid NOT IN (
SELECT objid FROM pg_depend WHERE deptype = 'e'
AND classid = 'pg_opclass'::regclass)
ORDER BY nspname, opcname, amname, amopstrategy"""
prquery = \
"""SELECT nspname AS schema, opcname AS name, amname AS index_method,
amprocnum AS support, amproc::regprocedure AS function
FROM pg_opclass o JOIN pg_am a ON (opcmethod = a.oid)
JOIN pg_namespace n ON (opcnamespace = n.oid), pg_amproc ap,
pg_depend
WHERE refclassid = 'pg_opclass'::regclass
AND classid = 'pg_amproc'::regclass AND objid = ap.oid
AND refobjid = o.oid
AND (nspname != 'pg_catalog' AND nspname != 'information_schema')
AND o.oid NOT IN (
SELECT objid FROM pg_depend WHERE deptype = 'e'
AND classid = 'pg_opclass'::regclass)
ORDER BY nspname, opcname, amname, amprocnum"""
def _from_catalog(self):
"""Initialize the dictionary of operator classes from the catalogs"""
for opclass in self.fetch():
if opclass.storage == '-':
del opclass.storage
self[opclass.key()] = OperatorClass(**opclass.__dict__)
opers = self.dbconn.fetchall(self.opquery)
self.dbconn.rollback()
for (sch, opc, idx, strat, oper) in opers:
opcls = self[(sch, opc, idx)]
if not hasattr(opcls, 'operators'):
opcls.operators = {}
opcls.operators.update({strat: oper})
funcs = self.dbconn.fetchall(self.prquery)
self.dbconn.rollback()
for (sch, opc, idx, supp, func) in funcs:
opcls = self[(sch, opc, idx)]
if not hasattr(opcls, 'functions'):
opcls.functions = {}
opcls.functions.update({supp: func})
def from_map(self, schema, inopcls):
"""Initalize the dictionary of operator classes from the input map
:param schema: schema owning the operator classes
:param inopcls: YAML map defining the operator classes
"""
for key in inopcls:
if not key.startswith('operator class ') or not ' using ' in key:
raise KeyError("Unrecognized object type: %s" % key)
pos = key.rfind(' using ')
opc = key[15:pos] # 15 = len('operator class ')
idx = key[pos + 7:] # 7 = len(' using ')
inopcl = inopcls[key]
self[(schema.name, opc, idx)] = opclass = OperatorClass(
schema=schema.name, name=opc, index_method=idx)
if not inopcl:
raise ValueError("Operator '%s' has no specification" % opc)
for attr, val in list(inopcl.items()):
setattr(opclass, attr, val)
if 'oldname' in inopcl:
opclass.oldname = inopcl['oldname']
if 'description' in inopcl:
opclass.description = inopcl['description']
def diff_map(self, inopcls):
"""Generate SQL to transform existing operator classes
:param inopcls: a YAML map defining the new operator classes
:return: list of SQL statements
Compares the existing operator class definitions, as fetched
from the catalogs, to the input map and generates SQL
statements to transform the operator classes accordingly.
"""
stmts = []
# check input operator classes
for (sch, opc, idx) in inopcls:
inoper = inopcls[(sch, opc, idx)]
# does it exist in the database?
if (sch, opc, idx) not in self:
if not hasattr(inoper, 'oldname'):
# create new operator
stmts.append(inoper.create())
else:
stmts.append(self[(sch, opc, idx)].rename(inoper))
else:
# check operator objects
stmts.append(self[(sch, opc, idx)].diff_map(inoper))
# check existing operators
for (sch, opc, idx) in self:
oper = self[(sch, opc, idx)]
# if missing, mark it for dropping
if (sch, opc, idx) not in inopcls:
oper.dropped = False
return stmts
def _drop(self):
"""Actually drop the operator classes
:return: SQL statements
"""
stmts = []
for (sch, opc, idx) in self:
oper = self[(sch, opc, idx)]
if hasattr(oper, 'dropped'):
stmts.append(oper.drop())
return stmts
|
|
import logging
from collections import defaultdict
from .. import Analysis
from .annotations import StackLocationAnnotation
from ..code_location import CodeLocation
from ..forward_analysis import ForwardAnalysis, FunctionGraphVisitor
from ... import BP, BP_AFTER
from ...keyed_region import KeyedRegion
from ...sim_variable import SimRegisterVariable, SimStackVariable, SimStackVariablePhi
l = logging.getLogger("angr.analyses.variable_recovery.variable_recovery")
class VariableRecoveryState(object):
"""
The abstract state of variable recovery analysis.
:ivar angr.knowledge.variable_manager.VariableManager variable_manager: The variable manager.
"""
def __init__(self, variable_manager, arch, func_addr, concrete_states, stack_region=None, register_region=None):
self.variable_manager = variable_manager # type: angr.knowledge.variable_manager.VariableManager
self.arch = arch
self.func_addr = func_addr
self._concrete_states = concrete_states
# self._state_per_instruction = { }
if stack_region is not None:
self.stack_region = stack_region
else:
self.stack_region = KeyedRegion()
if register_region is not None:
self.register_region = register_region
else:
self.register_region = KeyedRegion()
# register callbacks
self.register_callbacks(self.concrete_states)
def __repr__(self):
return "<VRAbstractState: %d register variables, %d stack variables>" % (len(self.register_region), len(self.stack_region))
@property
def concrete_states(self):
return self._concrete_states
@concrete_states.setter
def concrete_states(self, v):
self._concrete_states = v
def get_concrete_state(self, addr):
"""
:param addr:
:return:
"""
for s in self.concrete_states:
if s.ip._model_concrete.value == addr:
return s
return None
def copy(self):
state = VariableRecoveryState(self.variable_manager,
self.arch,
self.func_addr,
self._concrete_states,
stack_region=self.stack_region.copy(),
register_region=self.register_region.copy(),
)
return state
def register_callbacks(self, concrete_states):
"""
:param concrete_states:
:return:
"""
for concrete_state in concrete_states:
# clear existing breakpoints
# TODO: all breakpoints are removed. Fix this later by only removing breakpoints that we added
for bp_type in ('reg_read', 'reg_write', 'mem_read', 'mem_write', 'instruction'):
concrete_state.inspect._breakpoints[bp_type] = [ ]
concrete_state.inspect.add_breakpoint('reg_read', BP(when=BP_AFTER, enabled=True,
action=self._hook_register_read
)
)
concrete_state.inspect.add_breakpoint('reg_write', BP(enabled=True, action=self._hook_register_write))
concrete_state.inspect.add_breakpoint('mem_read', BP(when=BP_AFTER, enabled=True,
action=self._hook_memory_read
)
)
concrete_state.inspect.add_breakpoint('mem_write', BP(enabled=True, action=self._hook_memory_write))
def merge(self, other):
"""
Merge two abstract states.
:param VariableRecoveryState other: The other abstract state to merge.
:return: The merged abstract state.
:rtype: VariableRecoveryState
"""
# TODO: finish it
merged_concrete_states = [ self._concrete_states[0] ] # self._merge_concrete_states(other)
new_stack_region = self.stack_region.copy()
new_stack_region.merge(other.stack_region)
new_register_region = self.register_region.copy()
new_register_region.merge(other.register_region)
return VariableRecoveryState(self.variable_manager, self.arch, self.func_addr, merged_concrete_states,
stack_region=new_stack_region,
register_region=new_register_region
)
def _merge_concrete_states(self, other):
"""
:param VariableRecoveryState other:
:return:
:rtype: list
"""
merged = [ ]
for s in self.concrete_states:
other_state = other.get_concrete_state(s.ip._model_concrete.value)
if other_state is not None:
s = s.merge(other_state)
merged.append(s)
return merged
#
# SimInspect callbacks
#
def _hook_register_read(self, state):
reg_read_offset = state.inspect.reg_read_offset
reg_read_length = state.inspect.reg_read_length
if reg_read_offset == state.arch.sp_offset and reg_read_length == state.arch.bits / 8:
# TODO: make sure the sp is not overwritten by something that we are not tracking
return
#if reg_read_offset == state.arch.bp_offset and reg_read_length == state.arch.bits / 8:
# # TODO:
var_offset = self._normalize_register_offset(reg_read_offset)
if var_offset not in self.register_region:
# the variable being read doesn't exist before
variable = SimRegisterVariable(reg_read_offset, reg_read_length,
ident=self.variable_manager[self.func_addr].next_variable_ident('register'),
region=self.func_addr,
)
self.register_region.add_variable(var_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable('register', var_offset, variable)
def _hook_register_write(self, state):
reg_write_offset = state.inspect.reg_write_offset
if reg_write_offset == state.arch.sp_offset:
# it's updating stack pointer. skip
return
reg_write_expr = state.inspect.reg_write_expr
reg_write_length = len(reg_write_expr) / 8
# annotate it
# reg_write_expr = reg_write_expr.annotate(VariableSourceAnnotation.from_state(state))
state.inspect.reg_write_expr = reg_write_expr
# create the variable
variable = SimRegisterVariable(reg_write_offset, reg_write_length,
ident=self.variable_manager[self.func_addr].next_variable_ident('register'),
region=self.func_addr,
)
var_offset = self._normalize_register_offset(reg_write_offset)
self.register_region.set_variable(var_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable('register', var_offset, variable)
# is it writing a pointer to a stack variable into the register?
# e.g. lea eax, [ebp-0x40]
stack_offset = self._addr_to_stack_offset(reg_write_expr)
if stack_offset is not None:
# it is!
# unfortunately we don't know the size. We use size None for now.
if stack_offset not in self.stack_region:
new_var = SimStackVariable(stack_offset, None, base='bp',
ident=self.variable_manager[self.func_addr].next_variable_ident('stack'),
region=self.func_addr,
)
self.stack_region.add_variable(stack_offset, new_var)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable('stack', stack_offset, new_var)
base_offset = self.stack_region.get_base_addr(stack_offset)
assert base_offset is not None
for var in self.stack_region.get_variables_by_offset(stack_offset):
self.variable_manager[self.func_addr].reference_at(var, stack_offset - base_offset,
self._codeloc_from_state(state)
)
def _hook_memory_read(self, state):
mem_read_address = state.inspect.mem_read_address
mem_read_length = state.inspect.mem_read_length
stack_offset = self._addr_to_stack_offset(mem_read_address)
if stack_offset is None:
# it's not a stack access
# TODO:
pass
else:
if stack_offset not in self.stack_region:
# this stack offset is not covered by any existing stack variable
ident_sort = 'argument' if stack_offset > 0 else 'stack'
variable = SimStackVariable(stack_offset, mem_read_length, base='bp',
ident=self.variable_manager[self.func_addr].next_variable_ident(ident_sort),
region=self.func_addr,
)
self.stack_region.add_variable(stack_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable('stack', stack_offset, variable)
base_offset = self.stack_region.get_base_addr(stack_offset)
assert base_offset is not None
existing_variables = self.stack_region.get_variables_by_offset(stack_offset)
if len(existing_variables) > 1:
# create a phi node for all other variables
ident_sort = 'argument' if stack_offset > 0 else 'stack'
variable = SimStackVariablePhi(
ident=self.variable_manager[self.func_addr].next_variable_ident(ident_sort),
region=self.func_addr,
variables=existing_variables,
)
self.stack_region.set_variable(stack_offset, variable)
self.variable_manager[self.func_addr].add_variable('stack', stack_offset, variable)
for variable in self.stack_region.get_variables_by_offset(stack_offset):
self.variable_manager[self.func_addr].read_from(variable, stack_offset - base_offset, self._codeloc_from_state(state))
def _hook_memory_write(self, state):
mem_write_address = state.inspect.mem_write_address
mem_write_expr = state.inspect.mem_write_expr
mem_write_length = len(mem_write_expr) / 8
stack_offset = self._addr_to_stack_offset(mem_write_address)
if stack_offset is None:
# it's not a stack access
# TODO:
pass
else:
# we always add a new variable to keep it SSA
variable = SimStackVariable(stack_offset, mem_write_length, base='bp',
ident=self.variable_manager[self.func_addr].next_variable_ident('stack'),
region=self.func_addr,
)
self.stack_region.set_variable(stack_offset, variable)
# record this variable in variable manager
self.variable_manager[self.func_addr].add_variable('stack', stack_offset, variable)
base_offset = self.stack_region.get_base_addr(stack_offset)
assert base_offset is not None
for variable in self.stack_region.get_variables_by_offset(stack_offset):
self.variable_manager[self.func_addr].write_to(variable, stack_offset - base_offset, self._codeloc_from_state(state))
#
# Util methods
#
def _normalize_register_offset(self, offset): #pylint:disable=no-self-use
# TODO:
return offset
@staticmethod
def _codeloc_from_state(state):
return CodeLocation(state.scratch.bbl_addr, state.scratch.stmt_idx, ins_addr=state.scratch.ins_addr)
def _to_signed(self, n):
if n >= 2 ** (self.arch.bits - 1):
# convert it to a negative number
return n - 2 ** self.arch.bits
return n
def _addr_to_stack_offset(self, addr):
"""
Convert an address to a stack offset.
:param claripy.ast.Base addr: The address to convert from.
:return: A stack offset if the addr comes from the stack pointer, or None if the address
does not come from the stack pointer.
"""
def _parse(addr):
if addr.op == '__add__':
# __add__ might have multiple arguments
parsed = [ _parse(arg) for arg in addr.args ]
annotated = [ True for annotated, _ in parsed if annotated is True ]
if len(annotated) != 1:
# either nothing is annotated, or more than one element is annotated
raise ValueError()
return True, sum([ offset for _, offset in parsed ])
elif addr.op == '__sub__':
# __sub__ might have multiple arguments
parsed = [ _parse(arg) for arg in addr.args ]
first_annotated, first_offset = parsed[0]
if first_annotated is False:
# the first argument is not annotated. we don't support it.
raise ValueError()
if any([ annotated for annotated, _ in parsed[1:] ]):
# more than one argument is annotated. we don't support it.
raise ValueError()
return True, first_offset - sum([ offset for _, offset in parsed[1:] ])
else:
anno = next(iter(anno for anno in addr.annotations if isinstance(anno, StackLocationAnnotation)), None)
if anno is None:
if addr.op == 'BVV':
return False, addr._model_concrete.value
raise ValueError()
return True, anno.offset
# find the annotated AST
try: annotated, offset = _parse(addr)
except ValueError: return None
if not annotated:
return None
return self._to_signed(offset)
class VariableRecovery(ForwardAnalysis, Analysis): #pylint:disable=abstract-method
"""
Recover "variables" from a function using forced execution.
While variables play a very important role in programming, it does not really exist after compiling. However, we can
still identify and recovery their counterparts in binaries. It is worth noting that not every variable in source
code can be identified in binaries, and not every recognized variable in binaries have a corresponding variable in
the original source code. In short, there is no guarantee that the variables we identified/recognized in a binary
are the same variables in its source code.
This analysis uses heuristics to identify and recovers the following types of variables:
- Register variables.
- Stack variables.
- Heap variables.
- Global variables.
This analysis takes a function as input, and performs a data-flow analysis on nodes. It runs concrete execution on
every statement and hooks all register/memory accesses to discover all places that are accessing variables. It is
slow, but has a more accurate analysis result. For a fast but inaccurate variable recovery, you may consider using
VariableRecoveryFast.
This analysis follows SSA, which means every write creates a new variable in registers or memory (statck, heap,
etc.). Things may get tricky when overlapping variable (in memory, as you cannot really have overlapping accesses
to registers) accesses exist, and in such cases, a new variable will be created, and this new variable will overlap
with one or more existing varaibles. A decision procedure (which is pretty much TODO) is required at the end of this
analysis to resolve the conflicts between overlapping variables.
"""
def __init__(self, func, max_iterations=20):
"""
:param knowledge.Function func: The function to analyze.
"""
function_graph_visitor = FunctionGraphVisitor(func)
ForwardAnalysis.__init__(self, order_jobs=True, allow_merging=True, allow_widening=False,
graph_visitor=function_graph_visitor)
self.function = func
self._node_to_state = { }
self.variable_manager = self.kb.variables
self._max_iterations = max_iterations
self._node_iterations = defaultdict(int)
self._analyze()
#
# Main analysis routines
#
def _pre_analysis(self):
pass
def _pre_job_handling(self, job):
pass
def _initial_abstract_state(self, node):
concrete_state = self.project.factory.blank_state(
addr=node.addr,
mode='fastpath' # we don't want to do any solving
)
# annotate the stack pointer
concrete_state.regs.sp = concrete_state.regs.sp.annotate(StackLocationAnnotation(8))
# give it enough stack space
concrete_state.regs.bp = concrete_state.regs.sp + 0x100000
return VariableRecoveryState(self.variable_manager, self.project.arch, self.function.addr, [ concrete_state ])
def _merge_states(self, node, *states):
if len(states) == 1:
return states[0]
return reduce(lambda s_0, s_1: s_0.merge(s_1), states[1:], states[0])
def _run_on_node(self, node, state):
"""
:param angr.Block node:
:param VariableRecoveryState state:
:return:
"""
l.debug('Analyzing block %#x, iteration %d.', node.addr, self._node_iterations[node])
concrete_state = state.get_concrete_state(node.addr)
if concrete_state is None:
# didn't find any state going to here
l.error("_run_on_node(): cannot find any state for address %#x.", node.addr)
return False, state
state = state.copy()
if self._node_iterations[node] >= self._max_iterations:
l.debug('Skip node %s as we have iterated %d times on it.', node, self._node_iterations[node])
return False, state
state.register_callbacks([ concrete_state ])
successors = self.project.factory.successors(concrete_state,
addr=node.addr,
size=node.size,
opt_level=0 # disable the optimization in order to have
# instruction-level analysis results
)
output_states = successors.all_successors
state.concrete_states = output_states
self._node_to_state[node.addr] = state
self._node_iterations[node] += 1
return True, state
def _intra_analysis(self):
pass
def _post_analysis(self):
# TODO: only re-assign variable names to those that are newly changed
self.variable_manager.initialize_variable_names()
for addr, state in self._node_to_state.iteritems():
self.variable_manager[self.function.addr].set_live_variables(addr,
state.register_region,
state.stack_region
)
from angr.analyses import AnalysesHub
AnalysesHub.register_default('VariableRecovery', VariableRecovery)
|
|
# !/usr/bin/python3
import logging
from argparse import ArgumentParser
from collections import deque
from time import sleep
import yaml
from mppsolar.libs.mqttbroker import MqttBroker
from mppsolar.outputs import output_results
from mppsolar.ports import get_port
from mppsolar.protocols import get_protocol
from mppsolar.version import __version__ # noqa: F401
# from mppsolar.io import get_port
# Set-up logger
log = logging.getLogger("")
FORMAT = "%(asctime)-15s:%(levelname)s:%(module)s:%(funcName)s@%(lineno)d: %(message)s"
logging.basicConfig(format=FORMAT)
class ConfigError(Exception):
pass
sample_config = """
port:
path: /dev/ttyUSB0
type: test
baud: 2400
protocol: PI30
mqttbroker:
name: null
port: 1883
user: null
pass: null
adhoc_commands:
topic: test/command_topic
outputs:
- name: screen
- name: mqtt
commands:
- command: QPIGS
gap: 10s
outputs:
- name: screen
- name: mqtt
- command: QPIRI
gap: 1m
outputs:
- name: screen
tag: testtag
filter: volt
"""
ADHOC_COMMANDS = deque([])
SPLIT_TOKEN = ","
def mqtt_callback(client, userdata, msg):
print(f"Received `{msg.payload}` on topic `{msg.topic}`")
newCommand = msg.payload
ADHOC_COMMANDS.append(newCommand)
def main():
description = f"Power Device Monitoring Utility, version: {__version__}"
parser = ArgumentParser(description=description)
parser.add_argument(
"-C",
"--configFile",
nargs="?",
type=str,
help="Full location of config file",
const="/etc/mpp-solar/powermon.yml",
default=None,
)
parser.add_argument("-v", "--version", action="store_true", help="Display the version")
parser.add_argument(
"--generateConfigFile",
action="store_true",
help="Print a new config file based on options supplied (including the existing config file)",
)
parser.add_argument(
"--once",
action="store_true",
help="Only loop through config once",
)
parser.add_argument(
"-D",
"--debug",
action="store_true",
help="Enable Debug and above (i.e. all) messages",
)
parser.add_argument(
"-I", "--info", action="store_true", help="Enable Info and above level messages"
)
args = parser.parse_args()
# prog_name = parser.prog
# logging (DEBUG, INFO, WARNING, ERROR, CRITICAL)
# Turn on debug if needed
if args.debug:
log.setLevel(logging.DEBUG)
elif args.info:
log.setLevel(logging.INFO)
else:
# set default log level
log.setLevel(logging.WARNING)
logging.basicConfig()
# Display version if asked
log.info(description)
if args.version:
print(description)
return None
# Build configuration from defaults, config file and command line overrides
log.info(f"Using config file:{args.configFile}")
# build config - start with defaults
config = yaml.safe_load(sample_config)
# build config - update with details from config file
if args.configFile is not None:
with open("args.configFile", "r") as stream:
try:
config.update(yaml.safe_load(stream))
except yaml.YAMLError as exc:
print(exc)
# build config - override with any command line arguments
# TODO: command line overrides
# if generateConfigFile is true then print config out
if args.generateConfigFile:
print("# yaml config for powermon")
print("# default location /etc/mpp-solar/powermon.yml")
print(yaml.dump(config))
return
# debug dump config
log.debug(config)
# Build mqtt broker
# TODO disable if not defined
mqtt_broker = MqttBroker(
name=config["mqttbroker"]["name"],
port=config["mqttbroker"]["port"],
username=config["mqttbroker"]["user"],
password=config["mqttbroker"]["pass"],
)
log.debug(mqtt_broker)
# sub to command topic
# TODO disable if not defined
# TODO disable if mqttbroker is null
mqtt_broker.connect()
mqtt_broker.subscribe(config["adhoc_commands"]["topic"], mqtt_callback)
# connect to mqtt
mqtt_broker.start()
# get port
portconfig = config["port"].copy()
log.debug("portconfig", portconfig)
port = get_port(portconfig)
# port = get_port(porttype=porttype)
if not port:
log.error(f"No port for config '{portconfig}' found")
raise ConfigError(f"No port for config '{portconfig}' found")
# get protocol handler
protocol = get_protocol(protocol=config["protocol"])
loop = True
try:
# connect to port
port.connect()
while loop:
# loop through command list
for command in config["commands"]:
# process any adhoc commands first
log.debug(f"adhoc command list: {ADHOC_COMMANDS}")
while len(ADHOC_COMMANDS) > 0:
adhoc_command = ADHOC_COMMANDS.popleft().decode() # FIXME: decode to str #
log.info(f"Processing command: {adhoc_command}")
results = port.process_command(command=adhoc_command, protocol=protocol)
log.debug(f"results {results}")
# send to output processor(s)
# TODO sort outputs
output_results(
results=results, outputs=config["adhoc_commands"], mqtt_broker=mqtt_broker
)
# process 'normal' commands
log.info(f"Processing command: {command}")
results = port.process_command(command=command["command"], protocol=protocol)
log.debug(f"results {results}")
# send to output processor(s)
output_results(results=results, outputs=command, mqtt_broker=mqtt_broker)
# pause
pause_time = config["command_pause"]
log.debug(f"Sleeping for {pause_time}secs")
if args.once:
loop = False
else:
sleep(pause_time)
except KeyboardInterrupt:
print("KeyboardInterrupt")
finally:
# Disconnect port
port.disconnect()
# Disconnect mqtt
mqtt_broker.stop()
|
|
#!/usr/bin/python3
# db.py by Bill Weinman <http://bw.org/contact/>
# Copyright (c) 2010 The BearHeart Group, LLC
# created 2010-04-23
#
import sys, os
import sqlite3
from bwCGI import bwCGI
from bwDB import bwDB
from bwTL import tlFile
from bwConfig import configFile
__version__ = "1.1.3"
# namespace container for global variables
g = dict(
VERSION = 'db.py version {}'.format(__version__),
config_file = 'db.conf',
template_ext = '.html',
table_name = 'testimonial',
stacks = dict(
messages = [],
errors = [],
hiddens = []
)
)
def main():
init()
if 'a' in g['vars']: dispatch()
main_page()
def init():
g['cgi'] = bwCGI()
g['cgi'].send_header()
g['vars'] = g['cgi'].vars()
g['linkback'] = g['cgi'].linkback()
g['config'] = configFile(g['config_file']).recs()
g['tl'] = tlFile(None, showUnknowns = True)
g['db'] = bwDB( filename = g['config']['db'], table = g['table_name'] )
def dispatch():
v = g['vars']
a = v.getfirst('a')
if a == 'add':
add()
elif a == 'edit_del':
if 'edit' in v: edit()
elif 'delete' in v: delete_confirm()
else: error("invalid edit_del")
elif a == 'update':
if 'cancel' in v:
message('Edit canceled')
main_page()
else: update()
elif a == 'delete_do':
if 'cancel' in v:
message('Delete canceled')
main_page()
else: delete_do()
else:
error("unhandled jump: ", a)
main_page()
def main_page():
listrecs()
hidden('a', 'add')
page('main', 'Enter a new testimonial')
def listrecs():
''' display the database content '''
db = g['db']
v = g['vars']
sql_limit = int(g['config'].get('sql_limit', 25))
# how many records do we have?
count = db.countrecs()
message('There are {} records in the database. Add some more!'.format(count or 'no'))
# how many pages do we have?
numpages = count // int(sql_limit)
if count % int(sql_limit): numpages += 1
# what page is this?
curpage = 0
if 'jumppage' in v:
curpage = int(v.getfirst('jumppage'))
elif 'nextpage' in v:
curpage = int(v.getfirst('pageno')) + 1
elif 'prevpage' in v:
curpage = int(v.getfirst('pageno')) - 1
pagebar = list_pagebar(curpage, numpages)
a = ''
q = '''
SELECT * FROM {}
ORDER BY byline
LIMIT ?
OFFSET ?
'''.format(g['table_name'])
for r in db.sql_query(q, [sql_limit, (curpage * sql_limit)]):
set_form_vars(**r)
a += getpage('recline')
set_form_vars()
var('CONTENT', pagebar + a + pagebar )
def list_pagebar(pageno, numpages):
''' return the html for the pager line '''
prevlink = '<span class="n"><<</span>'
nextlink = '<span class="n">>></span>'
linkback = g['linkback']
if pageno > 0:
prevlink = '<a href="{}?pageno={}&prevpage=1"><<</a>'.format(linkback, pageno)
if pageno < ( numpages - 1 ):
nextlink = '<a href="{}?pageno={}&nextpage=1">>></a>'.format(linkback, pageno)
pagebar = ''
for n in range(0, numpages):
if n is pageno: pagebar += '<span class="n">{}</span>'.format(n + 1)
else: pagebar += '<a href="{}?jumppage={}">{}</a>'.format(linkback, n, n + 1)
var('prevlink', prevlink)
var('nextlink', nextlink)
var('pagebar', pagebar)
p = getpage('nextprev')
return p
def page(pagename, title = ''):
''' display a page from html template '''
tl = g['tl']
htmldir = g['config']['htmlDir']
file_ext = g['template_ext']
var('pageTitle', title)
var('VERSION', g['VERSION'])
set_stack_vars()
for p in ( 'header', pagename, 'footer' ):
try:
tl.file(os.path.join(htmldir, p + file_ext))
for line in tl.readlines(): print(line, end='') # lines are already terminated
except IOError as e:
errorexit('Cannot open file ({})'.format(e))
exit()
def getpage(p):
''' return a page as text from an html template '''
tl = g['tl']
htmldir = g['config']['htmlDir']
file_ext = g['template_ext']
a = ''
try:
tl.file(os.path.join(htmldir, p + file_ext))
for line in tl.readlines(): a += line # lines are already terminated
except IOError as e:
errorexit('Cannot open file ({})'.format(e))
return(a)
### actions
def add():
db = g['db']
v = g['vars']
cgi = g['cgi']
rec = dict(
testimonial = cgi.entity_encode(v.getfirst('testimonial')),
byline = cgi.entity_encode(v.getfirst('byline'))
)
db.insert(rec)
message('Record ({}) added'.format(rec['byline']))
main_page()
def edit():
id = g['vars'].getfirst('id')
rec = g['db'].getrec(id)
set_form_vars(**rec)
hidden('a', 'update')
hidden('id', id)
page('edit', 'Edit this testimonial')
def delete_confirm():
id = g['vars'].getfirst('id')
rec = g['db'].getrec(id)
set_form_vars(**rec)
hidden('a', 'delete_do')
hidden('id', id)
hidden('byline', rec['byline'])
page('delconfirm', 'Delete this testimonial?')
def delete_do():
db = g['db']
v = g['vars']
id = v.getfirst('id')
byline = v.getfirst('byline')
db.delete(id)
message('Record ({}) deleted'.format(byline))
main_page()
def update():
db = g['db']
v = g['vars']
cgi = g['cgi']
id = v.getfirst('id')
rec = dict(
id = id,
testimonial = cgi.entity_encode(v.getfirst('testimonial')),
byline = cgi.entity_encode(v.getfirst('byline'))
)
db.update(id, rec)
message('Record ({}) updated'.format(rec['byline']))
main_page()
### manage template variables
def var(n, v = None):
''' shortcut for setting a variable '''
return g['tl'].var(n, v)
def set_form_vars(**kwargs):
t = kwargs.get('testimonial', '')
b = kwargs.get('byline', '')
id = kwargs.get('id', '')
var('testimonial', t)
var('byline', b)
var('id', id)
var('SELF', g['linkback'])
def stackmessage(stack, *list, **kwargs):
sep = kwargs.get('sep', ' ')
m = sep.join(str(i) for i in list)
g['stacks'][stack].append(m)
def message(*list, **kwargs):
stackmessage('messages', *list, **kwargs)
def error(*list, **kwargs):
if 'cgi' in g:
stackmessage('errors', *list, **kwargs)
else:
errorexit(' '.join(list))
def hidden(n, v):
g['stacks']['hiddens'].append([n, v])
def set_stack_vars():
a = ''
for m in g['stacks']['messages']:
a += '<p class="message">{}</p>\n'.format(m)
var('MESSAGES', a)
a = ''
for m in g['stacks']['errors']:
a += '<p class="error">{}</p>\n'.format(m)
var('ERRORS', a)
a = ''
for m in g['stacks']['hiddens']:
a += '<input type="hidden" name="{}" value="{}" />\n'.format(*m)
var('hiddens', a)
### utilities
def errorexit(e):
me = os.path.basename(sys.argv[0])
print('<p style="color:red">')
print('{}: {}'.format(me, e))
print('</p>')
exit(0)
def message_page(*list):
message(*list)
main_page()
def debug(*args):
print(*args, file=sys.stderr)
if __name__ == "__main__": main()
|
|
#!/usr/bin/env python
#
# $Id: kfssetup.py 36 2007-11-12 02:43:36Z sriramsrao $
#
# Copyright 2007 Kosmix Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Script to setup KFS servers on a set of nodes
# This scripts reads a machines.cfg file that describes the meta/chunk
# servers configurations and installs the binaries/scripts and creates
# the necessary directory hierarchy.
#
import os,sys,os.path,getopt
import socket,threading,popen2
import md5
from ConfigParser import ConfigParser
# Use the python config parser to parse out machines setup
# Input file format for machines.cfg
# [metaserver]
# type: metaserver
# clusterkey: <cluster name>
# node: <value>
# rundir: <dir>
# baseport: <port>
#
# [chunkserver1]
# node: <value>
# rundir: <dir>
# baseport: <port>
# space: <space exported by the server> (n m/g)
# {chunkdir: <dir>}
# [chunkserver2]
# ...
# [chunkserverN]
# ...
#
# where, space is expressed in units of MB/GB or bytes.
#
# Install on each machine with the following directory hierarchy:
# rundir/
# bin/ -- binaries, config file, kfscp/kfslog/kfschunk dirs
# logs/ -- log output from running the binary
# scripts/ -- all the helper scripts
# If a path for storing the chunks isn't specified, then it defaults to bin
#
unitsScale = {'g' : 1 << 30, 'm' : 1 << 20, 'k' : 1 << 10, 'b' : 1}
maxConcurrent = 25
chunkserversOnly = 0
tarProg = 'gtar'
md5String = ""
def which(program):
import os
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def setupMeta(section, config, outputFn, packageFn):
""" Setup the metaserver binaries/config files on a node. """
global chunkserversOnly
if chunkserversOnly > 0:
print "Chunkservers only is set; not doing meta"
return
key = config.get(section, 'clusterkey')
baseport = config.getint(section, 'baseport')
rundir = config.get(section, 'rundir')
fh = open(outputFn, 'w')
print >> fh, "metaServer.clientPort = %d" % baseport
print >> fh, "metaServer.chunkServerPort = %d" % (baseport + 100)
print >> fh, "metaServer.clusterKey = %s" % (key)
print >> fh, "metaServer.cpDir = %s/bin/kfscp" % rundir
print >> fh, "metaServer.logDir = %s/bin/kfslog" % rundir
if config.has_option(section, 'loglevel'):
print >> fh, "metaServer.loglevel = %s" % config.get(section, 'loglevel')
if config.has_option(section, 'worm'):
print >> fh, "metaServer.wormMode = 1"
if config.has_option(section, 'numservers'):
print >> fh, "metaServer.minChunkservers = %s" % config.get(section, 'numservers')
if config.has_option(section, 'md5sumfilename'):
print >> fh, "metaServer.md5sumFilename = %s" % config.get(section, 'md5sumfilename')
fh.close()
if config.has_option(section, 'webuiConfFile'):
confFile = config.get(section, 'webuiConfFile')
fh = open(confFile, 'w')
print >> fh, "[webserver]"
print >> fh, "webServer.metaserverPort = %d" % baseport
print >> fh, "webServer.port = %d" % (baseport + 50)
print >> fh, "webServer.allMachinesFn = %s/webui/all-machines.txt" % rundir
print >> fh, "webServer.docRoot = %s/webui/files" % rundir
fh.close()
cmd = "%s -zcf %s bin/logcompactor bin/metaserver %s lib webui scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
installArgs = "-r %s -d %s -m" % (tarProg, rundir)
return installArgs
def setupChunkConfig(section, config, outputFn):
""" Setup the chunkserver binaries/config files on a node. """
metaNode = config.get('metaserver', 'node')
metaToChunkPort = config.getint('metaserver', 'baseport') + 100
hostname = config.get(section, 'node')
# for rack-aware replication, we assume that nodes on different racks are on different subnets
s = socket.gethostbyname(hostname)
ipoctets = s.split('.')
rackId = int(ipoctets[2])
#
fh = open (outputFn, 'w')
print >> fh, "chunkServer.metaServer.hostname = %s" % metaNode
print >> fh, "chunkServer.metaServer.port = %d" % metaToChunkPort
print >> fh, "chunkServer.clientPort = %d" % config.getint(section, 'baseport')
print >> fh, "chunkServer.clusterKey = %s" % config.get('metaserver', 'clusterkey')
print >> fh, "chunkServer.rackId = %d" % (rackId)
print >> fh, "chunkServer.md5sum = %s" % (md5String)
space = config.get(section, 'space')
s = space.split()
if (len(s) >= 2):
units = s[1].lower()
else:
units = 'b'
value = int(s[0]) * unitsScale[ units[0] ]
print >> fh, "chunkServer.totalSpace = %d" % value
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
print >> fh, "chunkServer.chunkDir = %s" % (chunkDir)
print >> fh, "chunkServer.logDir = %s/bin/kfslog" % (rundir)
if config.has_option(section, 'loglevel'):
print >> fh, "chunkServer.loglevel = %s" % config.get(section, 'loglevel')
fh.close()
def setupChunk(section, config, outputFn, packageFn):
""" Setup the chunkserver binaries/config files on a node. """
setupChunkConfig(section, config, outputFn)
cmd = "%s -zcf %s bin/chunkscrubber bin/chunkserver %s lib scripts/*" % (tarProg, packageFn, outputFn)
os.system(cmd)
rundir = config.get(section, 'rundir')
if config.has_option(section, 'chunkdir'):
chunkDir = config.get(section, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
installArgs = "-r %s -d %s -c \"%s\" " % (tarProg, rundir, chunkDir)
return installArgs
def usage():
""" Print out the usage for this program. """
print "%s [-f, --file <server.cfg>] [-m , --machines <chunkservers.txt>] [-r, --tar <tar|gtar>] \
[-w, --webui <webui dir>] [ [-b, --bin <dir with binaries>] {-u, --upgrade} | [-U, --uninstall] ]\n" % sys.argv[0]
return
def copyDir(srcDir, dstDir):
""" Copy files from src to dest"""
cmd = "cp -r %s %s" % (srcDir, dstDir)
os.system(cmd)
def computeMD5(datadir, digest):
"""Update the MD5 digest using the MD5 of all the files in a directory"""
files = os.listdir(datadir)
for f in sorted(files):
path = os.path.join(datadir, f)
if os.path.isdir(path):
continue
fh = open(path, 'r')
while 1:
buf = fh.read(4096)
if buf == "":
break
digest.update(buf)
def getFiles(buildDir, webuidir):
""" Copy files from buildDir/bin, buildDir/lib and . to ./bin, ./lib, and ./scripts
respectively."""
global md5String
cmd = "mkdir -p ./scripts; cp ./* scripts; chmod u+w scripts/*"
os.system(cmd)
s = "%s/bin" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './bin')
digest = md5.new()
computeMD5('./bin', digest)
s = "%s/lib" % buildDir
if (os.path.exists(s + "/amd64")):
s += "/amd64"
copyDir(s, './lib')
computeMD5('./lib', digest)
md5String = digest.hexdigest()
copyDir(webuidir, './webui')
def cleanup(fn):
""" Cleanout the dirs we created. """
cmd = "rm -rf ./scripts ./bin ./lib ./webui %s " % fn
os.system(cmd)
class InstallWorker(threading.Thread):
"""InstallWorker thread that runs a command on remote node"""
def __init__(self, sec, conf, tmpdir, i, m):
threading.Thread.__init__(self)
self.section = sec
self.config = conf
self.tmpdir = tmpdir
self.id = i
self.mode = m
self.doBuildPkg = 1
def singlePackageForAll(self, packageFn, installArgs):
self.doBuildPkg = 0
self.packageFn = packageFn
self.installArgs = installArgs
def buildPackage(self):
if (self.section == 'metaserver'):
self.installArgs = setupMeta(self.section, self.config, self.configOutputFn, self.packageFn)
else:
self.installArgs = setupChunk(self.section, self.config, self.configOutputFn, self.packageFn)
def doInstall(self):
fn = os.path.basename(self.packageFn)
if (self.section == 'metaserver'):
if chunkserversOnly > 0:
return
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.dest, self.dest, fn, self.mode, self.installArgs)
else:
# chunkserver
configFn = os.path.basename(self.configOutputFn)
c = "scp -pr -o StrictHostKeyChecking=no -q %s kfsinstall.sh %s %s:/tmp/; ssh -o StrictHostKeyChecking=no %s 'mv /tmp/%s /tmp/kfspkg.tgz; mv /tmp/%s /tmp/ChunkServer.prp; sh /tmp/kfsinstall.sh %s %s ' " % \
(self.packageFn, self.configOutputFn, self.dest, self.dest, fn, configFn, self.mode, self.installArgs)
p = popen2.Popen3(c, True)
for out in p.fromchild:
if len(out) > 1:
print '[%s]: %s' % (self.dest, out[:-1])
def cleanup(self):
if self.doBuildPkg > 0:
# if we built the package, nuke it
c = "rm -f %s %s" % (self.configOutputFn, self.packageFn)
else:
c = "rm -f %s" % (self.configOutputFn)
os.system(c)
c = "ssh -o StrictHostKeyChecking=no %s 'rm -f /tmp/install.sh /tmp/kfspkg.tgz' " % self.dest
popen2.Popen3(c, True)
def run(self):
self.configOutputFn = "%s/fn.%d" % (self.tmpdir, self.id)
if self.doBuildPkg > 0:
self.packageFn = "%s/kfspkg.%d.tgz" % (self.tmpdir, self.id)
self.buildPackage()
else:
setupChunkConfig(self.section, self.config, self.configOutputFn)
self.dest = config.get(self.section, 'node')
self.doInstall()
self.cleanup()
def doInstall(config, builddir, tmpdir, webuidir, upgrade, serialMode):
if not config.has_section('metaserver'):
raise config.NoSectionError, "No metaserver section"
if not os.path.exists(builddir):
print "%s : directory doesn't exist\n" % builddir
sys.exit(-1)
getFiles(builddir, webuidir)
if os.path.exists('webui'):
webuiconfFile = os.path.join(webuidir, "server.conf")
config.set('metaserver', 'webuiConfFile', webuiconfFile)
workers = []
i = 0
sections = config.sections()
if upgrade == 1:
mode = "-u"
else:
mode = "-i"
chunkPkgFn = ""
cleanupFn = ""
for s in sections:
w = InstallWorker(s, config, tmpdir, i, mode)
workers.append(w)
if serialMode == 1:
w.start()
w.join()
else:
# same package for all chunkservers
if (s != 'metaserver'):
if chunkPkgFn == "":
configOutputFn = "%s/fn.common" % (tmpdir)
chunkPkgFn = "kfspkg-chunk.tgz"
cleanupFn = "%s %s" % (configOutputFn, chunkPkgFn)
installArgs = setupChunk(s, config, configOutputFn, chunkPkgFn)
w.singlePackageForAll(chunkPkgFn, installArgs)
i = i + 1
if serialMode == 0:
for i in xrange(0, len(workers), maxConcurrent):
#start a bunch
for j in xrange(maxConcurrent):
idx = i + j
if idx >= len(workers):
break
workers[idx].start()
#wait for each one to finish
for j in xrange(maxConcurrent):
idx = i + j
if idx >= len(workers):
break
workers[idx].join()
print "Done with %d workers" % idx
for i in xrange(len(workers)):
workers[i].join(120.0)
cleanup(cleanupFn)
class UnInstallWorker(threading.Thread):
"""UnInstallWorker thread that runs a command on remote node"""
def __init__(self, c, n):
threading.Thread.__init__(self)
self.cmd = c
self.node = n
def run(self):
# capture stderr and ignore the hostkey has changed message
p = popen2.Popen3(self.cmd, True)
for out in p.fromchild:
if len(out) > 1:
print '[%s]: %s' % (self.node, out[:-1])
def doUninstall(config):
sections = config.sections()
workers = []
for s in sections:
rundir = config.get(s, 'rundir')
node = config.get(s, 'node')
if (s == 'metaserver'):
otherArgs = '-m'
else:
# This is a chunkserver; so nuke out chunk dir as well
if config.has_option(s, 'chunkdir'):
chunkDir = config.get(s, 'chunkdir')
else:
chunkDir = "%s/bin/kfschunk" % (rundir)
otherArgs = "-c \"%s\"" % (chunkDir)
cmd = "ssh -o StrictHostKeyChecking=no %s 'cd %s; sh scripts/kfsinstall.sh -U -d %s %s' " % \
(node, rundir, rundir, otherArgs)
# print "Uninstall cmd: %s\n" % cmd
# os.system(cmd)
w = UnInstallWorker(cmd, node)
workers.append(w)
w.start()
print "Started all the workers..waiting for them to finish"
for i in xrange(len(workers)):
workers[i].join(120.0)
sys.exit(0)
def readChunkserversFile(machinesFn):
'''Given a list of chunkserver node names, one per line, construct a config
for each chunkserver and add that to the config based on the defaults'''
global config
defaultChunkOptions = config.options("chunkserver_defaults")
for l in open(machinesFn, 'r'):
line = l.strip()
if (line.startswith('#')):
# ignore commented out node names
continue
section_name = "chunkserver_" + line
config.add_section(section_name)
config.set(section_name, "node", line)
for o in defaultChunkOptions:
config.set(section_name, o, config.get("chunkserver_defaults", o))
config.remove_section("chunkserver_defaults")
if __name__ == '__main__':
(opts, args) = getopt.getopt(sys.argv[1:], "cb:f:m:r:t:w:hsUu",
["chunkserversOnly", "build=", "file=", "machines=", "tar=", "tmpdir=",
"webui=", "help", "serialMode", "uninstall", "upgrade"])
filename = ""
builddir = ""
uninstall = 0
upgrade = 0
serialMode = 0
machines = ""
webuidir = ""
chunkserversOnly = 0
# Script probably won't work right if you change tmpdir from /tmp location
tmpdir = "/tmp"
for (o, a) in opts:
if o in ("-h", "--help"):
usage()
sys.exit(2)
if o in ("-f", "--file"):
filename = a
elif o in ("-b", "--build"):
builddir = a
elif o in ("-c", "--chunkserversOnly"):
chunkserversOnly = 1
elif o in ("-m", "--machines"):
machines = a
elif o in ("-r", "--tar"):
tarProg = a
elif o in ("-w", "--webuidir"):
webuidir = a
elif o in ("-t", "--tmpdir"):
tmpdir = a
elif o in ("-U", "--uninstall"):
uninstall = 1
elif o in ("-u", "--upgrade"):
upgrade = 1
elif o in ("-s", "--serialMode"):
serialMode = 1
if not which(tarProg):
if (which('gtar')):
tarProg = 'gtar'
else:
tarProg = 'tar'
if not os.path.exists(filename):
print "%s : directory doesn't exist\n" % filename
sys.exit(-1)
config = ConfigParser()
config.readfp(open(filename, 'r'))
if machines != "":
readChunkserversFile(machines)
if uninstall == 1:
doUninstall(config)
else:
doInstall(config, builddir, tmpdir, webuidir, upgrade, serialMode)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI Routers for the Identity service."""
from keystone.common import router
from keystone.common import wsgi
from keystone import config
from keystone.identity import controllers
class Public(wsgi.ComposableRouter):
def add_routes(self, mapper):
tenant_controller = controllers.Tenant()
mapper.connect('/tenants',
controller=tenant_controller,
action='get_projects_for_token',
conditions=dict(method=['GET']))
class Admin(wsgi.ComposableRouter):
def add_routes(self, mapper):
# Tenant Operations
tenant_controller = controllers.Tenant()
mapper.connect('/tenants',
controller=tenant_controller,
action='get_all_projects',
conditions=dict(method=['GET']))
mapper.connect('/tenants/{tenant_id}',
controller=tenant_controller,
action='get_project',
conditions=dict(method=['GET']))
# User Operations
user_controller = controllers.User()
mapper.connect('/users/{user_id}',
controller=user_controller,
action='get_user',
conditions=dict(method=['GET']))
# Role Operations
roles_controller = controllers.Role()
mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles',
controller=roles_controller,
action='get_user_roles',
conditions=dict(method=['GET']))
mapper.connect('/users/{user_id}/roles',
controller=roles_controller,
action='get_user_roles',
conditions=dict(method=['GET']))
def append_v3_routers(mapper, routers):
routers.append(
router.Router(controllers.DomainV3(),
'domains', 'domain'))
project_controller = controllers.ProjectV3()
routers.append(
router.Router(project_controller,
'projects', 'project'))
mapper.connect('/users/{user_id}/projects',
controller=project_controller,
action='list_user_projects',
conditions=dict(method=['GET']))
user_controller = controllers.UserV3()
routers.append(
router.Router(user_controller,
'users', 'user'))
mapper.connect('/groups/{group_id}/users',
controller=user_controller,
action='list_users_in_group',
conditions=dict(method=['GET']))
mapper.connect('/groups/{group_id}/users/{user_id}',
controller=user_controller,
action='add_user_to_group',
conditions=dict(method=['PUT']))
mapper.connect('/groups/{group_id}/users/{user_id}',
controller=user_controller,
action='check_user_in_group',
conditions=dict(method=['HEAD']))
mapper.connect('/groups/{group_id}/users/{user_id}',
controller=user_controller,
action='remove_user_from_group',
conditions=dict(method=['DELETE']))
group_controller = controllers.GroupV3()
routers.append(
router.Router(group_controller,
'groups', 'group'))
mapper.connect('/users/{user_id}/groups',
controller=group_controller,
action='list_groups_for_user',
conditions=dict(method=['GET']))
role_controller = controllers.RoleV3()
routers.append(router.Router(role_controller, 'roles', 'role'))
mapper.connect('/projects/{project_id}/users/{user_id}/roles/{role_id}',
controller=role_controller,
action='create_grant',
conditions=dict(method=['PUT']))
mapper.connect('/projects/{project_id}/groups/{group_id}/roles/{role_id}',
controller=role_controller,
action='create_grant',
conditions=dict(method=['PUT']))
mapper.connect('/projects/{project_id}/users/{user_id}/roles/{role_id}',
controller=role_controller,
action='check_grant',
conditions=dict(method=['HEAD']))
mapper.connect('/projects/{project_id}/groups/{group_id}/roles/{role_id}',
controller=role_controller,
action='check_grant',
conditions=dict(method=['HEAD']))
mapper.connect('/projects/{project_id}/users/{user_id}/roles',
controller=role_controller,
action='list_grants',
conditions=dict(method=['GET']))
mapper.connect('/projects/{project_id}/groups/{group_id}/roles',
controller=role_controller,
action='list_grants',
conditions=dict(method=['GET']))
mapper.connect('/projects/{project_id}/users/{user_id}/roles/{role_id}',
controller=role_controller,
action='revoke_grant',
conditions=dict(method=['DELETE']))
mapper.connect('/projects/{project_id}/groups/{group_id}/roles/{role_id}',
controller=role_controller,
action='revoke_grant',
conditions=dict(method=['DELETE']))
mapper.connect('/domains/{domain_id}/users/{user_id}/roles/{role_id}',
controller=role_controller,
action='create_grant',
conditions=dict(method=['PUT']))
mapper.connect('/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
controller=role_controller,
action='create_grant',
conditions=dict(method=['PUT']))
mapper.connect('/domains/{domain_id}/users/{user_id}/roles/{role_id}',
controller=role_controller,
action='check_grant',
conditions=dict(method=['HEAD']))
mapper.connect('/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
controller=role_controller,
action='check_grant',
conditions=dict(method=['HEAD']))
mapper.connect('/domains/{domain_id}/users/{user_id}/roles',
controller=role_controller,
action='list_grants',
conditions=dict(method=['GET']))
mapper.connect('/domains/{domain_id}/groups/{group_id}/roles',
controller=role_controller,
action='list_grants',
conditions=dict(method=['GET']))
mapper.connect('/domains/{domain_id}/users/{user_id}/roles/{role_id}',
controller=role_controller,
action='revoke_grant',
conditions=dict(method=['DELETE']))
mapper.connect('/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
controller=role_controller,
action='revoke_grant',
conditions=dict(method=['DELETE']))
if config.CONF.os_inherit.enabled:
mapper.connect(('/OS-INHERIT/domains/{domain_id}/users/{user_id}'
'/roles/{role_id}/inherited_to_projects'),
controller=role_controller,
action='create_grant',
conditions=dict(method=['PUT']))
mapper.connect(('/OS-INHERIT/domains/{domain_id}/groups/{group_id}'
'/roles/{role_id}/inherited_to_projects'),
controller=role_controller,
action='create_grant',
conditions=dict(method=['PUT']))
mapper.connect(('/OS-INHERIT/domains/{domain_id}/users/{user_id}'
'/roles/{role_id}/inherited_to_projects'),
controller=role_controller,
action='check_grant',
conditions=dict(method=['HEAD']))
mapper.connect(('/OS-INHERIT/domains/{domain_id}/groups/{group_id}'
'/roles/{role_id}/inherited_to_projects'),
controller=role_controller,
action='check_grant',
conditions=dict(method=['HEAD']))
mapper.connect(('/OS-INHERIT/domains/{domain_id}/users/{user_id}'
'/roles/inherited_to_projects'),
controller=role_controller,
action='list_grants',
conditions=dict(method=['GET']))
mapper.connect(('/OS-INHERIT/domains/{domain_id}/groups/{group_id}'
'/roles/inherited_to_projects'),
controller=role_controller,
action='list_grants',
conditions=dict(method=['GET']))
mapper.connect(('/OS-INHERIT/domains/{domain_id}/users/{user_id}'
'/roles/{role_id}/inherited_to_projects'),
controller=role_controller,
action='revoke_grant',
conditions=dict(method=['DELETE']))
mapper.connect(('/OS-INHERIT/domains/{domain_id}/groups/{group_id}'
'/roles/{role_id}/inherited_to_projects'),
controller=role_controller,
action='revoke_grant',
conditions=dict(method=['DELETE']))
routers.append(
router.Router(controllers.RoleAssignmentV3(),
'role_assignments', 'role_assignment'))
|
|
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import snapshots_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestSnapshotsClient(base.BaseServiceTest):
FAKE_CREATE_SNAPSHOT = {
"snapshot": {
"display_name": "snap-001",
"display_description": "Daily backup",
"volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
"force": True
}
}
FAKE_UPDATE_SNAPSHOT_REQUEST = {
"metadata": {
"key": "v1"
}
}
FAKE_INFO_SNAPSHOT = {
"snapshot": {
"id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
"display_name": "snap-001",
"display_description": "Daily backup",
"volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
"status": "available",
"size": 30,
"created_at": "2012-02-29T03:50:07Z"
}
}
FAKE_LIST_SNAPSHOTS = {
"snapshots": [
{
"id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
"display_name": "snap-001",
"display_description": "Daily backup",
"volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
"status": "available",
"size": 30,
"created_at": "2012-02-29T03:50:07Z",
"metadata": {
"contents": "junk"
}
},
{
"id": "e479997c-650b-40a4-9dfe-77655818b0d2",
"display_name": "snap-002",
"display_description": "Weekly backup",
"volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358",
"status": "available",
"size": 25,
"created_at": "2012-03-19T01:52:47Z",
"metadata": {}
}
]
}
FAKE_SNAPSHOT_METADATA_ITEM = {
"meta": {
"key1": "value1"
}
}
def setUp(self):
super(TestSnapshotsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = snapshots_client.SnapshotsClient(fake_auth,
'volume',
'regionOne')
def _test_create_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.create_snapshot,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_SNAPSHOT,
bytes_body,
status=202)
def _test_show_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_INFO_SNAPSHOT,
bytes_body,
snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
def _test_list_snapshots(self, bytes_body=False):
self.check_service_client_function(
self.client.list_snapshots,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_SNAPSHOTS,
bytes_body,
detail=True)
def _test_create_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.create_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_INFO_SNAPSHOT,
bytes_body,
snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
metadata={"key": "v1"})
def _test_update_snapshot(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_UPDATE_SNAPSHOT_REQUEST,
bytes_body,
snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
def _test_show_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_UPDATE_SNAPSHOT_REQUEST,
bytes_body,
snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
def _test_update_snapshot_metadata(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_UPDATE_SNAPSHOT_REQUEST,
bytes_body, snapshot_id="cbc36478b0bd8e67e89")
def _test_update_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.update_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_INFO_SNAPSHOT,
bytes_body, volume_type_id="cbc36478b0bd8e67e89")
def _test_show_snapshot_metadata_item(self, bytes_body=False):
self.check_service_client_function(
self.client.show_snapshot_metadata_item,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SNAPSHOT_METADATA_ITEM,
bytes_body,
snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
id="key1")
def test_create_snapshot_with_str_body(self):
self._test_create_snapshot()
def test_create_snapshot_with_bytes_body(self):
self._test_create_snapshot(bytes_body=True)
def test_show_snapshot_with_str_body(self):
self._test_show_snapshot()
def test_show_snapshot_with_bytes_body(self):
self._test_show_snapshot(bytes_body=True)
def test_list_snapshots_with_str_body(self):
self._test_list_snapshots()
def test_list_snapshots_with_bytes_body(self):
self._test_list_snapshots(bytes_body=True)
def test_create_snapshot_metadata_with_str_body(self):
self._test_create_snapshot_metadata()
def test_create_snapshot_metadata_with_bytes_body(self):
self._test_create_snapshot_metadata(bytes_body=True)
def test_update_snapshot_with_str_body(self):
self._test_update_snapshot()
def test_update_snapshot_with_bytes_body(self):
self._test_update_snapshot(bytes_body=True)
def test_show_snapshot_metadata_with_str_body(self):
self._test_show_snapshot_metadata()
def test_show_snapshot_metadata_with_bytes_body(self):
self._test_show_snapshot_metadata(bytes_body=True)
def test_update_snapshot_metadata_with_str_body(self):
self._test_update_snapshot_metadata()
def test_update_snapshot_metadata_with_bytes_body(self):
self._test_update_snapshot_metadata(bytes_body=True)
def test_show_snapshot_metadata_item_with_str_body(self):
self._test_show_snapshot_metadata_item()
def test_show_snapshot_metadata_item_with_bytes_body(self):
self._test_show_snapshot_metadata_item(bytes_body=True)
def test_force_delete_snapshot(self):
self.check_service_client_function(
self.client.force_delete_snapshot,
'tempest.lib.common.rest_client.RestClient.post',
{},
snapshot_id="521752a6-acf6-4b2d-bc7a-119f9148cd8c",
status=202)
def test_delete_snapshot(self):
self.check_service_client_function(
self.client.delete_snapshot,
'tempest.lib.common.rest_client.RestClient.delete',
{},
snapshot_id="521752a6-acf6-4b2d-bc7a-119f9148cd8c",
status=202)
|
|
"""
Plotting utility functions.
"""
import cartopy.crs as ccrs
from cartopy.util import add_cyclic_point
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
import xarray
from xarray.plot.utils import _determine_cmap_params
import warnings
# Some default plotting arguments; not really used in this script
# but taken from my plotting toolkit along with geo_plot and
# add_colorbar.
_PLOTTYPE_ARGS = {
'pcolormesh': dict(linewidth='0'),
'pcolor': dict(linewidth='0'),
'contourf': dict(),
}
def add_colorbar(mappable, fig=None, ax=None, thickness=0.025,
shrink=0.1, pad=0.05, orientation='horizontal'):
""" Add a colorbar into an existing axis or figure. Need to pass
either an Axis or Figure element to the appropriate keyword
argument. Should elegantly handle multi-axes figures.
Parameters
----------
mappable : mappable
The element set with data to tailor to the colorbar
fig : Figure
ax: Axis
thickness: float
The width/height of the colorbar in fractional figure area,
given either vertical/horizontal orientation.
shrink: float
Fraction of the width/height of the figure to leave blank
pad : float
Padding between bottom/right subplot edge and the colorbar
orientation : str
The orientation of the colorbar
"""
if (fig is None) and (ax is None):
raise ValueError("Must pass either 'fig' or 'ax'")
elif fig is None:
# Plot on Axis
cb = plt.colorbar(mappable, ax=ax, pad=pad, orientation=orientation)
else:
# Plot onto Figure's set of axes
axes = fig.get_axes()
# Get coordinates for making the colorbar
ul = axes[0]
lr = axes[-1]
top = ul.get_position().get_points()[1][1]
bot = lr.get_position().get_points()[0][1]
right = lr.get_position().get_points()[1][0]
left = ul.get_position().get_points()[0][0]
# Calculate colorbar positioning and geometry
if orientation == 'vertical':
cb_left = right + pad
cb_width = thickness
cb_bottom = bot + shrink
cb_height = (top - shrink) - cb_bottom
elif orientation == 'horizontal':
cb_left = left + shrink
cb_width = (right - shrink) - cb_left
cb_height = thickness
cb_bottom = (bot - pad) - cb_height
else:
raise ValueError("Uknown orientation '%s'" % orientation)
cax = fig.add_axes([cb_left, cb_bottom,
cb_width, cb_height])
cb = fig.colorbar(mappable, cax=cax, orientation=orientation)
return cb
def check_cyclic(data, coord='lon'):
""" Checks if a DataArray already includes a cyclic point along the
specified coordinate axis. If not, adds the cyclic point and returns
the modified DataArray.
"""
return np.all(data.isel(**{coord: 0}) == data.isel(**{coord: -1}))
def cyclic_dataarray(da, coord='lon'):
""" Add a cyclic coordinate point to a DataArray along a specified
named coordinate dimension.
>>> from xray import DataArray
>>> data = DataArray([[1, 2, 3], [4, 5, 6]],
... coords={'x': [1, 2], 'y': range(3)},
... dims=['x', 'y'])
>>> cd = cyclic_dataarray(data, 'y')
>>> print cd.data
array([[1, 2, 3, 1],
[4, 5, 6, 4]])
"""
assert isinstance(da, xray.DataArray)
lon_idx = da.dims.index(coord)
cyclic_data, cyclic_coord = add_cyclic_point(da.values,
coord=da.coords[coord],
axis=lon_idx)
# Copy and add the cyclic coordinate and data
new_coords = dict(da.coords)
new_coords[coord] = cyclic_coord
new_values = cyclic_data
new_da = xray.DataArray(new_values, dims=da.dims, coords=new_coords)
# Copy the attributes for the re-constructed data and coords
for att, val in da.attrs.items():
new_da.attrs[att] = val
for c in da.coords:
for att in da.coords[c].attrs:
new_da.coords[c].attrs[att] = da.coords[c].attrs[att]
return new_da
def geo_plot(darray, ax=None, method='contourf',
projection='PlateCarree', grid=False, **kwargs):
""" Create a global plot of a given variable.
Parameters:
-----------
darray : xray.DataArray
The darray to be plotted.
ax : axis
An existing axis instance, else one will be created.
method : str
String to use for looking up name of plotting function via iris
projection : str or tuple
Name of the cartopy projection to use and any args
necessary for initializing it passed as a dictionary;
see func:`make_geoaxes` for more information
grid : bool
Include lat-lon grid overlay
**kwargs : dict
Any additional keyword arguments to pass to the plotter,
including colormap params. If 'vmin' is not in this
set of optional keyword arguments, the plot colormap will be
automatically inferred.
"""
# Set up plotting function
if method in _PLOTTYPE_ARGS:
extra_args = _PLOTTYPE_ARGS[method].copy()
else:
raise ValueError("Don't know how to deal with '%s' method" % method)
extra_args.update(**kwargs)
# Alias a plot function based on the requested method and the
# datatype being plotted
plot_func = plt.__dict__[method]
# `transform` should be the ORIGINAL coordinate system -
# which is always a simple lat-lon coordinate system in CESM
# output
extra_args['transform'] = ccrs.PlateCarree()
# Was an axis passed to plot on?
new_axis = ax is None
if new_axis: # Create a new cartopy axis object for plotting
if isinstance(projection, (list, tuple)):
if len(projection) != 2:
raise ValueError("Expected 'projection' to only have 2 values")
projection, proj_kwargs = projection[0], projection[1]
else:
proj_kwargs = {}
# hack to look up the name of the projection in the cartopy
# reference system namespace; makes life a bit easier, so you
# can just pass a string with the name of the projection wanted.
proj = ccrs.__dict__[projection](**proj_kwargs)
ax = plt.axes(projection=proj)
else: # Set current axis to one passed as argument
if not hasattr(ax, 'projection'):
raise ValueError("Expected `ax` to be a GeoAxes instance")
plt.sca(ax)
# Setup map
ax.set_global()
ax.coastlines()
try:
gl = ax.gridlines(crs=extra_args['transform'], draw_labels=True,
linewidth=0.5, color='grey', alpha=0.8)
LON_TICKS = [ -180, -90, 0, 90, 180 ]
LAT_TICKS = [ -90, -60, -30, 0, 30, 60, 90 ]
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlines = grid
gl.ylines = grid
gl.xlocator = mticker.FixedLocator(LON_TICKS)
gl.ylocator = mticker.FixedLocator(LAT_TICKS)
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
except TypeError:
warnings.warn("Could not label the given map projection.")
# Infer colormap settings if not provided
if not ('vmin' in kwargs):
warnings.warn("Re-inferring color parameters...")
cmap_kws = _determine_cmap_params(darray.data)
extra_args.update(cmap_kws)
gp = plot_func(darray.lon.values, darray.lat.values, darray.data,
**extra_args)
return ax, gp
|
|
def find_seeing(SUPA,FLAT_TYPE):
import os, re, utilities, sys
from copy import copy
dict = get_files(SUPA,FLAT_TYPE)
print dict['file']
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
print dict['files']
#params PIXSCALE GAIN
''' quick run through for seeing '''
children = []
for image in search_params['files']:
child = os.fork()
if child:
children.append(child)
else:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print ROOT
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
#os.system('rm ' + finalflagim)
#command = "ic -p 16 '1 %2 %1 0 == ?' " + weightim + " " + flagim + " > " + finalflagim
#utilities.run(command)
command = "nice sex %(file)s -c %(PHOTCONF)s/singleastrom.conf.sex \
-FLAG_IMAGE ''\
-FLAG_TYPE MAX\
-CATALOG_NAME %(TEMPDIR)s/seeing_%(ROOT)s.cat \
-FILTER_NAME %(PHOTCONF)s/default.conv\
-CATALOG_TYPE 'ASCII' \
-DETECT_MINAREA 8 -DETECT_THRESH 8.\
-ANALYSIS_THRESH 8 \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT\
-PARAMETERS_NAME %(PHOTCONF)s/singleastrom.ascii.flag.sex" % params
print command
os.system(command)
sys.exit(0)
for child in children:
os.waitpid(child,0)
command = 'cat ' + search_params['TEMPDIR'] + 'seeing_' + SUPA + '*cat > ' + search_params['TEMPDIR'] + 'paste_seeing_' + SUPA + '.cat'
utilities.run(command)
file_seeing = search_params['TEMPDIR'] + '/paste_seeing_' + SUPA + '.cat'
PIXSCALE = float(search_params['PIXSCALE'])
reload(utilities)
fwhm = utilities.calc_seeing(file_seeing,10,PIXSCALE)
save_exposure({'fwhm':fwhm},SUPA,FLAT_TYPE)
print file_seeing, SUPA, PIXSCALE
def find_files(directories):
exposures = {}
import glob
files = glob.glob(directories + '/SCIENCE/*fits')
print files
def sextract(FILE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':search_params['cluster']}
subpath='/nfs/slac/g/ki/ki05/anja/SUBARU/'
children = []
print search_params
kws = utilities.get_header_kw(search_params['files'][0],['PPRUN'])
print kws['PPRUN']
pprun = kws['PPRUN']
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
search_params['files'].sort()
if 1:
print search_params['files']
for image in search_params['files']:
print image
child = os.fork()
if child:
children.append(child)
else:
try:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
BASE = re.split('O',ROOT)[0]
params['BASE'] = BASE
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print NUM, BASE, ROOT
params['GAIN'] = 2.50 ## WARNING!!!!!!
print ROOT
finalflagim = "%(TEMPDIR)sflag_%(ROOT)s.fits" % params
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
im = "/%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits" % params
crpix = utilities.get_header_kw(im,['CRPIX1','CRPIX2'])
SDSS1 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)s.head" % params
SDSS2 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)sO*.head" % params
from glob import glob
print glob(SDSS1), glob(SDSS2)
head = None
if len(glob(SDSS1)) > 0:
head = glob(SDSS1)[0]
elif len(glob(SDSS2)) > 0:
head = glob(SDSS2)[0]
if head is None:
command = "sex /%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(TEMPDIR)s/%(ROOT)s.cat \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 3 -DETECT_THRESH 3 -ANALYSIS_THRESH 3 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
filtcatname = "%(TEMPDIR)s/%(ROOT)s.filt.cat" % params
print command
utilities.run(command,[catname])
utilities.run('ldacfilter -i ' + catname + ' -o ' + filtcatname + ' -t LDAC_OBJECTS\
-c "(CLASS_STAR > 0.0);"',[filtcatname])
if len(glob(filtcatname)) > 0:
import commands
lines = commands.getoutput('ldactoasc -s -b -i ' + filtcatname + ' -t LDAC_OBJECTS | wc -l')
import re
res = re.split('\n',lines)
print lines
if int(res[-1]) == 0: sys.exit(0)
command = 'scamp ' + filtcatname + " -SOLVE_PHOTOM N -ASTREF_CATALOG SDSS-R6 -CHECKPLOT_TYPE NONE -WRITE_XML N "
print command
utilities.run(command)
head = "%(TEMPDIR)s/%(ROOT)s.filt.head" % params
#headfile = "%(TEMPDIR)s/%(ROOT)s.head" % params
print head
if head is not None:
hf = open(head,'r').readlines()
hdict = {}
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
hdict[name] = value
imfix = "%(TEMPDIR)s/%(ROOT)s.fixwcs.fits" % params
print imfix
os.system('mkdir ' + search_params['TEMPDIR'])
command = "cp " + im + " " + imfix
print command
utilities.run(command)
import commands
out = commands.getoutput('gethead ' + imfix + ' CRPIX1 CRPIX2')
import re
res = re.split('\s+',out)
os.system('sethead ' + imfix + ' CRPIX1OLD=' + res[0])
os.system('sethead ' + imfix + ' CRPIX2OLD=' + res[1])
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
command = 'sethead ' + imfix + ' ' + name + '=' + hdict[name]
print command
os.system(command)
main_file = '%(TEMPDIR)s/%(ROOT)s.fixwcs.fits' % params
doubles_raw = [{'file_pattern':main_file,'im_type':''},
{'file_pattern':subpath+pprun+'/SCIENCE_DOMEFLAT*/'+BASE+'OC*.fits','im_type':'D'},
{'file_pattern':subpath+pprun+'/SCIENCE_SKYFLAT*/'+BASE+'OC*.fits','im_type':'S'}]
#{'file_pattern':subpath+pprun+'/SCIENCE/OC_IMAGES/'+BASE+'OC*.fits','im_type':'OC'}
# ]
print doubles_raw
doubles_output = []
print doubles_raw
for double in doubles_raw:
file = glob(double['file_pattern'])
if len(file) > 0:
params.update(double)
params['double_cat'] = '%(TEMPDIR)s/%(ROOT)s.%(im_type)s.fixwcs.cat' % params
params['file_double'] = file[0]
command = "nice sex %(TEMPDIR)s%(ROOT)s.fixwcs.fits,%(file_double)s -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(double_cat)s \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 3 -DETECT_THRESH 3 -ANALYSIS_THRESH 3 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
print command
utilities.run(command,[catname])
command = 'ldacconv -b 1 -c R -i ' + params['double_cat'] + ' -o ' + params['double_cat'].replace('cat','rawconv')
print command
utilities.run(command)
#command = 'ldactoasc -b -q -i ' + params['double_cat'].replace('cat','rawconv') + ' -t OBJECTS\
# -k ALPHA_J2000 DELTA_J2000 > ' + params['double_cat'].replace('cat','pos')
#print command
#utilities.run(command)
#print 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + params['double_cat'].replace('cat','pos')
#utilities.run(command)
#print params['double_cat'].replace('cat','pos')
# Xpos_ABS is difference of CRPIX and zero CRPIX
doubles_output.append({'cat':params['double_cat'].replace('cat','rawconv'),'im_type':double['im_type']})
print doubles_output
print '***********************************'
outfile = params['TEMPDIR'] + params['ROOT'] + '.conv'
combine_cats(doubles_output,outfile,search_params)
#outfile_field = params['TEMPDIR'] + params['ROOT'] + '.field'
#command = 'ldacdeltab -i ' + outfile + ' -t FIELDS -o ' + outfile_field
#utilities.run(command)
command = 'ldactoasc -b -q -i ' + outfile + ' -t OBJECTS\
-k ALPHA_J2000 DELTA_J2000 > ' + outfile.replace('conv','pos')
print command
utilities.run(command)
command = 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + outfile.replace('conv','pos')
print command
utilities.run(command)
print outfile
command = 'ldaccalc -i ' + outfile + ' -o ' + params['TEMPDIR'] + params['ROOT'] + '.newpos -t OBJECTS -c "(Xpos + ' + str(float(search_params['CRPIX1ZERO']) - float(crpix['CRPIX1'])) + ');" -k FLOAT -n Xpos_ABS "" -c "(Ypos + ' + str(float(search_params['CRPIX2ZERO']) - float(crpix['CRPIX2'])) + ');" -k FLOAT -n Ypos_ABS "" -c "(Ypos*0 + ' + str(params['NUM']) + ');" -k FLOAT -n CHIP "" '
print command
utilities.run(command)
except:
print sys.exc_info()
print 'finishing'
sys.exit(0)
sys.exit(0)
print children
for child in children:
print 'waiting for', child
os.waitpid(child,0)
print 'finished waiting'
pasted_cat = path + 'PHOTOMETRY/ILLUMINATION/' + 'pasted_' + SUPA + '_' + search_params['filter'] + '_' + str(search_params['ROTATION']) + '.cat'
from glob import glob
outcat = search_params['TEMPDIR'] + 'tmppaste_' + SUPA + '.cat'
newposlist = glob(search_params['TEMPDIR'] + SUPA + '*newpos')
print search_params['TEMPDIR'] + SUPA + '*newpos'
if len(newposlist) > 1:
#command = 'ldacpaste -i ' + search_params['TEMPDIR'] + SUPA + '*newpos -o ' + pasted_cat
#print command
files = glob(search_params['TEMPDIR'] + SUPA + '*newpos')
print files
paste_cats(files,pasted_cat)
else:
command = 'cp ' + newposlist[0] + ' ' + pasted_cat
utilities.run(command)
save_exposure({'pasted_cat':pasted_cat},SUPA,FLAT_TYPE)
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz'.replace('.tarz',''))
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz'.replace('.tarz',''))
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#return exposures, LENGTH1, LENGTH2
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google ML Engine Hook.
"""
import logging
import random
import time
from typing import Callable, Dict, List, Optional
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
from airflow.version import version as airflow_version
log = logging.getLogger(__name__)
_AIRFLOW_VERSION = 'v' + airflow_version.replace('.', '-').replace('+', '-')
def _poll_with_exponential_delay(request, max_n, is_done_func, is_error_func):
for i in range(0, max_n):
try:
response = request.execute()
if is_error_func(response):
raise ValueError(
'The response contained an error: {}'.format(response)
)
if is_done_func(response):
log.info('Operation is done: %s', response)
return response
time.sleep((2**i) + (random.randint(0, 1000) / 1000))
except HttpError as e:
if e.resp.status != 429:
log.info('Something went wrong. Not retrying: %s', format(e))
raise
else:
time.sleep((2**i) + (random.randint(0, 1000) / 1000))
raise ValueError('Connection could not be established after {} retries.'.format(max_n))
class MLEngineHook(CloudBaseHook):
"""
Hook for Google ML Engine APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def get_conn(self):
"""
Retrieves the connection to MLEngine.
:return: Google MLEngine services object.
"""
authed_http = self._authorize()
return build('ml', 'v1', http=authed_http, cache_discovery=False)
@CloudBaseHook.fallback_to_default_project_id
def create_job(
self,
job: Dict,
project_id: Optional[str] = None,
use_existing_job_fn: Optional[Callable] = None
) -> Dict:
"""
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched. If set to None or missing, the default project_id from the GCP
connection is used.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict
"""
if not project_id:
raise ValueError("The project_id should be set")
hook = self.get_conn()
self._append_label(job)
request = hook.projects().jobs().create( # pylint: disable=no-member
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
self.log.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
self.log.error('Failed to create MLEngine job: %s', e)
raise
return self._wait_for_job_done(project_id, job_id)
def _get_job(self, project_id: str, job_id: str) -> Dict:
"""
Gets a MLEngine job based on the job id.
:param project_id: The project in which the Job is located.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param job_id: A unique id for the Google MLEngine job. (templated)
:type job_id: str
:return: MLEngine job object if succeed.
:rtype: dict
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = hook.projects().jobs().get(name=job_name) # pylint: disable=no-member
while True:
try:
return request.execute()
except HttpError as e:
if e.resp.status == 429:
# polling after 30 seconds when quota failure occurs
time.sleep(30)
else:
self.log.error('Failed to get MLEngine job: %s', e)
raise
def _wait_for_job_done(self, project_id: str, job_id: str, interval: int = 30):
"""
Waits for the Job to reach a terminal state.
This method will periodically check the job state until the job reach
a terminal state.
:param project_id: The project in which the Job is located.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param job_id: A unique id for the Google MLEngine job. (templated)
:type job_id: str
:param interval: Time expressed in seconds after which the job status is checked again. (templated)
:type interval: int
:raises: googleapiclient.errors.HttpError
"""
if interval <= 0:
raise ValueError("Interval must be > 0")
while True:
job = self._get_job(project_id, job_id)
if job['state'] in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
return job
time.sleep(interval)
@CloudBaseHook.fallback_to_default_project_id
def create_version(
self,
model_name: str,
version_spec: Dict,
project_id: Optional[str] = None,
) -> Dict:
"""
Creates the Version on Google Cloud ML Engine.
:param version_spec: A dictionary containing the information about the version. (templated)
:type version_spec: dict
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to.
(templated)
:type model_name: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used.
(templated)
:type project_id: str
:return: If the version was created successfully, returns the operation.
Otherwise raises an error .
:rtype: dict
"""
hook = self.get_conn()
parent_name = 'projects/{}/models/{}'.format(project_id, model_name)
self._append_label(version_spec)
create_request = hook.projects().models().versions().create( # pylint: disable=no-member
parent=parent_name, body=version_spec)
response = create_request.execute()
get_request = hook.projects().operations().get( # pylint: disable=no-member
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None)
@CloudBaseHook.fallback_to_default_project_id
def set_default_version(
self,
model_name: str,
version_name: str,
project_id: Optional[str] = None,
) -> Dict:
"""
Sets a version to be the default. Blocks until finished.
:param model_name: The name of the Google Cloud ML Engine model that the version belongs to.
(templated)
:type model_name: str
:param version_name: A name to use for the version being operated upon. (templated)
:type version_name: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:return: If successful, return an instance of Version.
Otherwise raises an error.
:rtype: dict
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
full_version_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
request = hook.projects().models().versions().setDefault( # pylint: disable=no-member
name=full_version_name, body={})
try:
response = request.execute()
self.log.info('Successfully set version: %s to default', response)
return response
except HttpError as e:
self.log.error('Something went wrong: %s', e)
raise
@CloudBaseHook.fallback_to_default_project_id
def list_versions(
self,
model_name: str,
project_id: Optional[str] = None,
) -> List[Dict]:
"""
Lists all available versions of a model. Blocks until finished.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:type model_name: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:return: return an list of instance of Version.
:rtype: List[Dict]
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
result = [] # type: List[Dict]
full_parent_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = hook.projects().models().versions().list( # pylint: disable=no-member
parent=full_parent_name, pageSize=100)
while request is not None:
response = request.execute()
result.extend(response.get('versions', []))
request = hook.projects().models().versions().list_next( # pylint: disable=no-member
previous_request=request,
previous_response=response)
time.sleep(5)
return result
@CloudBaseHook.fallback_to_default_project_id
def delete_version(
self,
model_name: str,
version_name: str,
project_id: Optional[str] = None,
) -> Dict:
"""
Deletes the given version of a model. Blocks until finished.
:param model_name: The name of the Google Cloud ML Engine model that the version
belongs to. (templated)
:type model_name: str
:param project_id: The Google Cloud project name to which MLEngine
model belongs.
:type project_id: str
:return: If the version was deleted successfully, returns the operation.
Otherwise raises an error.
:rtype: Dict
"""
if not project_id:
raise ValueError("The project_id should be set")
hook = self.get_conn()
full_name = 'projects/{}/models/{}/versions/{}'.format(
project_id, model_name, version_name)
delete_request = hook.projects().models().versions().delete( # pylint: disable=no-member
name=full_name)
response = delete_request.execute()
get_request = hook.projects().operations().get( # pylint: disable=no-member
name=response['name'])
return _poll_with_exponential_delay(
request=get_request,
max_n=9,
is_done_func=lambda resp: resp.get('done', False),
is_error_func=lambda resp: resp.get('error', None) is not None)
@CloudBaseHook.fallback_to_default_project_id
def create_model(
self,
model: Dict,
project_id: Optional[str] = None
) -> Dict:
"""
Create a Model. Blocks until finished.
:param model: A dictionary containing the information about the model.
:type model: dict
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:return: If the version was created successfully, returns the instance of Model.
Otherwise raises an error.
:rtype: Dict
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
if not model['name']:
raise ValueError("Model name must be provided and "
"could not be an empty string")
project = 'projects/{}'.format(project_id)
self._append_label(model)
request = hook.projects().models().create( # pylint: disable=no-member
parent=project, body=model)
return request.execute()
@CloudBaseHook.fallback_to_default_project_id
def get_model(
self,
model_name: str,
project_id: Optional[str] = None,
) -> Optional[Dict]:
"""
Gets a Model. Blocks until finished.
:param model_name: The name of the model.
:type model_name: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:return: If the model exists, returns the instance of Model.
Otherwise return None.
:rtype: Dict
:raises: googleapiclient.errors.HttpError
"""
hook = self.get_conn()
if not model_name:
raise ValueError("Model name must be provided and "
"it could not be an empty string")
full_model_name = 'projects/{}/models/{}'.format(
project_id, model_name)
request = hook.projects().models().get(name=full_model_name) # pylint: disable=no-member
try:
return request.execute()
except HttpError as e:
if e.resp.status == 404:
self.log.error('Model was not found: %s', e)
return None
raise
@CloudBaseHook.fallback_to_default_project_id
def delete_model(
self,
model_name: str,
delete_contents: bool = False,
project_id: Optional[str] = None,
) -> None:
"""
Delete a Model. Blocks until finished.
:param model_name: The name of the model.
:type model_name: str
:param delete_contents: Whether to force the deletion even if the models is not empty.
Will delete all version (if any) in the dataset if set to True.
The default value is False.
:type delete_contents: bool
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:raises: googleapiclient.errors.HttpError
"""
if not project_id:
raise ValueError("The project_id should be set")
hook = self.get_conn()
if not model_name:
raise ValueError("Model name must be provided and it could not be an empty string")
model_path = 'projects/{}/models/{}'.format(project_id, model_name)
if delete_contents:
self._delete_all_versions(model_name, project_id)
request = hook.projects().models().delete(name=model_path) # pylint: disable=no-member
try:
request.execute()
except HttpError as e:
if e.resp.status == 404:
self.log.error('Model was not found: %s', e)
return
raise
def _delete_all_versions(self, model_name: str, project_id: str):
versions = self.list_versions(project_id=project_id, model_name=model_name)
# The default version can only be deleted when it is the last one in the model
non_default_versions = (version for version in versions if not version.get('isDefault', False))
for version in non_default_versions:
_, _, version_name = version['name'].rpartition('/')
self.delete_version(project_id=project_id, model_name=model_name, version_name=version_name)
default_versions = (version for version in versions if version.get('isDefault', False))
for version in default_versions:
_, _, version_name = version['name'].rpartition('/')
self.delete_version(project_id=project_id, model_name=model_name, version_name=version_name)
def _append_label(self, model: Dict) -> None:
model['labels'] = model.get('labels', {})
model['labels']['airflow-version'] = _AIRFLOW_VERSION
|
|
# -*- coding: utf-8 -*-
"""
Buildings Assessments module
@author Fran Boon <[email protected]>
Data model from:
http://www.atcouncil.org/products/downloadable-products/placards
Postearthquake Safety Evaluation of Buildings: ATC-20
http://www.atcouncil.org/pdfs/rapid.pdf
This is actually based on the New Zealand variant:
http://eden.sahanafoundation.org/wiki/BluePrintBuildingAssessments
@ToDo: Hide fields for triage form server side
- once print comes from controller then it will also skip these fields
- less to download to browser (more scalable)
@ToDo: add other forms
"""
module = request.controller
resourcename = request.function
if module not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# Options Menu (available in all Functions' Views)
def shn_menu():
menu = [
[T("NZSEE Level 1"), False, aURL(r=request, f="nzseel1"), [
[T("Submit New (triage)"), False, aURL(p="create", r=request, f="nzseel1", args="create", vars={"triage":1})],
[T("Submit New (full form)"), False, aURL(p="create", r=request, f="nzseel1", args="create")],
[T("Search"), False, aURL(r=request, f="nzseel1", args="search")],
[T("List"), False, aURL(r=request, f="nzseel1")],
]],
[T("NZSEE Level 2"), False, aURL(r=request, f="nzseel2"), [
[T("Submit New"), False, aURL(p="create", r=request, f="nzseel2", args="create")],
[T("Search"), False, aURL(r=request, f="nzseel2", args="search")],
[T("List"), False, aURL(r=request, f="nzseel2")],
]],
[T("Report"), False, aURL(r=request, f="index"),
[
[T("Snapshot"), False, aURL(r=request, f="report")],
[T("Assessment timeline"), False, aURL(r=request, f="timeline")],
[T("Assessment admin level"), False, aURL(r=request, f="adminLevel")],
]
]
]
response.menu_options = menu
shn_menu()
# S3 framework functions
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# NZSEE Level 1 (~ATC-20 Rapid Evaluation) Safety Assessment Form -------------
def nzseel1():
"""
RESTful CRUD controller
@ToDo: Action Button to create a new L2 Assessment from an L1
"""
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-populate Inspector ID
table.person_id.default = s3_logged_in_person()
# Subheadings in forms:
s3xrc.model.configure(table,
deletable=False,
create_next = URL(r=request, c=module, f=resourcename, args="[id]"),
subheadings = {
".": "name", # Description in ATC-20
"%s / %s" % (T("Overall Hazards"), T("Damage")): "collapse",
".": "posting",
"%s:" % T("Further Action Recommended"): "barricades",
".": "estimated_damage",
})
rheader = lambda r: nzseel1_rheader(r)
output = s3_rest_controller(module, resourcename,
rheader=rheader)
return output
# -----------------------------------------------------------------------------
def nzseel1_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
if r.name == "nzseel1":
assess = r.record
if assess:
rheader_tabs = s3_rheader_tabs(r, tabs)
location = assess.location_id
if location:
location = shn_gis_location_represent(location)
person = assess.person_id
if person:
pe_id = db(db.pr_person.id == person).select(db.pr_person.pe_id, limitby=(0, 1)).first().pe_id
query = (db.pr_contact.pe_id == pe_id) & (db.pr_contact.contact_method == 2)
mobile = db(query).select(db.pr_contact.value, limitby=(0, 1)).first()
if mobile:
mobile = mobile.value
person = vita.fullname(person)
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Person")), person,
TH("%s: " % T("Mobile")), mobile,
),
TR(
TH("%s: " % T("Location")), location,
TH("%s: " % T("Date")), assess.date
),
TR(
TH(""), "",
TH("%s: " % T("Ticket ID")),
r.table.ticket_id.represent(assess.ticket_id),
),
),
rheader_tabs)
return rheader
return None
# -----------------------------------------------------------------------------
# NZSEE Level 2 (~ATC-20 Rapid Evaluation) Safety Assessment Form
def nzseel2():
"""
RESTful CRUD controller
"""
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-populate Inspector ID
table.person_id.default = s3_logged_in_person()
# Subheadings in forms:
s3xrc.model.configure(table,
deletable=False,
create_next = URL(r=request, c=module, f=resourcename, args="[id]"),
subheadings = {
".": "name", # Description in ATC-20
"%s / %s" % (T("Overall Hazards"), T("Damage")): "collapse",
".": "posting_existing",
"%s:" % T("Further Action Recommended"): "barricades",
".": "estimated_damage",
"%s / %s" % (T("Structural Hazards"), T("Damage")): "structural_foundations",
"%s / %s" % (T("Non-structural Hazards"), T("Damage")): "non_parapets",
"%s / %s" % (T("Geotechnical Hazards"), T("Damage")): "geotechnical_slope",
})
rheader = lambda r: nzseel2_rheader(r)
output = s3_rest_controller(module, resourcename,
rheader=rheader)
return output
# -----------------------------------------------------------------------------
def nzseel2_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
if r.name == "nzseel2":
assess = r.record
if assess:
rheader_tabs = s3_rheader_tabs(r, tabs)
location = assess.location_id
if location:
location = shn_gis_location_represent(location)
person = assess.person_id
if person:
pe_id = db(db.pr_person.id == person).select(db.pr_person.pe_id, limitby=(0, 1)).first().pe_id
query = (db.pr_contact.pe_id == pe_id) & (db.pr_contact.contact_method == 2)
mobile = db(query).select(db.pr_contact.value, limitby=(0, 1)).first()
if mobile:
mobile = mobile.value
person = vita.fullname(person)
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Person")), person,
TH("%s: " % T("Mobile")), mobile,
),
TR(
TH("%s: " % T("Location")), location,
TH("%s: " % T("Date")), assess.date
),
TR(
TH(""), "",
TH("%s: " % T("Ticket ID")),
r.table.ticket_id.represent(assess.ticket_id),
),
),
rheader_tabs)
return rheader
return None
# -----------------------------------------------------------------------------
def report():
"""
A report providing assessment totals, and breakdown by assessment type and status.
e.g. Level 1 (red, yellow, green) Level 2 (R1-R3, Y1-Y2, G1-G2)
@ToDo: Make into a Custom Method to be able to support Table ACLs
(currently protected by Controller ACL)
"""
level1 = Storage()
table = db.building_nzseel1
# Which is more efficient?
# A) 4 separate .count() in DB
# B) Pulling all records into Python & doing counts in Python
query = (table.deleted == False)
level1.total = db(query).count()
filter = (table.posting == 1)
level1.green = db(query & filter).count()
filter = (table.posting == 2)
level1.yellow = db(query & filter).count()
filter = (table.posting == 3)
level1.red = db(query & filter).count()
level2 = Storage()
table = db.building_nzseel2
query = (table.deleted == False)
level2.total = db(query).count()
filter = (table.posting.belongs((1, 2)))
level2.green = db(query & filter).count()
filter = (table.posting.belongs((3, 4)))
level2.yellow = db(query & filter).count()
filter = (table.posting.belongs((5, 6, 7)))
level2.red = db(query & filter).count()
return dict(level1=level1,
level2=level2)
# -----------------------------------------------------------------------------
#def getformatedData(dbresult):
# result = []
# cnt = -1;
# # Format the results
# for row in dbresult:
# damage = row.estimated_damage
# try:
# trueDate = row.date #datetime.datetime.strptime(row.date, "%Y-%m-%d %H:%M:%S")
# except:
# trueDate = row.created_on
# date = trueDate.strftime('%d %b %Y')
# hour = trueDate.strftime("%H")
# key = (date, hour)
# if (cnt == -1) or (result[cnt][0] != key):
# result.append([key , 0, 0, 0, 0, 0, 0, 0, 1])
# cnt += 1
# else:
# result[cnt][8] += 1
# result[cnt][damage] += 1
#
# return result
def getformatedData(dbresult):
result = []
cntT = cntH = -1
for row in dbresult:
damage = row.estimated_damage
try:
trueDate = row.date
except:
trueDate = row.created_on
date = trueDate.strftime('%d %b %Y')
hour = trueDate.strftime("%H")
keyT = (date, "Total")
keyH = (date, hour)
#print date, hour, keyT, keyH, cntT, cntH
if (cntT == -1) or (result[cntT][0] != keyT):
result.append([keyT, 0, 0, 0, 0, 0, 0, 0, 0])
cntT = cntH + 1
cntH = cntT
if (result[cntH][0] != keyH):
result.append([keyH, 0, 0, 0, 0, 0, 0, 0, 0])
cntH += 1
result[cntT][8] += 1
result[cntH][8] += 1
result[cntT][damage] += 1
result[cntH][damage] += 1
return result
def timeline():
"""
A report providing assessments received broken down by time
"""
result = Storage()
inspection = []
creation = []
# raw SQL command
# select `date`, estimated_damage FROM building_nzseel1 WHERE deleted = "F" ORDER BY `date` DESC
table = db.building_nzseel1
dbresult = db(table.deleted == False).select(table.date,
table.estimated_damage,
orderby=~table.date,
)
inspection = getformatedData(dbresult)
# Here is the raw SQL command
# select created_on, estimated_damage FROM building_nzseel1 WHERE deleted = "F" ORDER BY created_on DESC
dbresult = db(table.deleted == False).select(table.created_on,
table.estimated_damage,
orderby=~table.created_on,
)
creation = getformatedData(dbresult)
totals = [0, 0, 0, 0, 0, 0, 0, 0]
for line in inspection:
if line[0][1] == "Total":
for i in range(8):
totals[i] += line[i + 1]
return dict(inspection=inspection,
creation=creation,
totals= totals
)
# -----------------------------------------------------------------------------
def adminLevel():
"""
A report providing assessments received broken down by administration level
"""
# raw SQL command
# select parent, `path`, estimated_damage FROM building_nzseel1, gis_location WHERE building_nzseel1.deleted = "F" and (gis_location.id = building_nzseel1.location_id)
tableNZ1 = db.building_nzseel1
tableGIS = db.gis_location
query = (tableNZ1.location_id == tableGIS.id) & (tableNZ1.deleted == False)
dbresult = db(query).select(tableGIS.path,
tableGIS.parent,
tableNZ1.estimated_damage
)
result = []
temp = {}
# Format the results
for row in dbresult:
parent = row.gis_location.parent ##report[0]
path = row.gis_location.path #report[1]
damage = row.building_nzseel1.estimated_damage #report[2]
if temp.has_key(parent):
temp[parent][7] += 1
else:
temp[parent]=[0, 0, 0, 0, 0, 0, 0, 1]
temp[parent][damage - 1] += 1
gis = {}
for (key) in temp.keys():
# raw SQL command
# "select name, parent FROM gis_location WHERE gis_location.id = '%s'" % key
row = tableGIS(key)
if row == None:
gis[key] = T("Unknown")
else:
gis[key] = row.name
for (key,item) in temp.items():
if gis.has_key(key):
name = gis[key]
else:
name = T("Unknown")
result.append((name,item))
return dict(report=result,
)
# -----------------------------------------------------------------------------
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import numpy as np
import mxnet as mx
class BinaryRBM(mx.operator.CustomOp):
def __init__(self, k):
self.k = k # Persistent contrastive divergence k
def forward(self, is_train, req, in_data, out_data, aux):
visible_layer_data = in_data[0] # (num_batch, num_visible)
visible_layer_bias = in_data[1] # (num_visible,)
hidden_layer_bias = in_data[2] # (num_hidden,)
interaction_weight= in_data[3] # (num_visible, num_hidden)
if is_train:
_, hidden_layer_prob_1 = self.sample_hidden_layer(visible_layer_data, hidden_layer_bias, interaction_weight)
hidden_layer_sample = aux[1] # The initial state of the Gibbs sampling for persistent CD
else:
hidden_layer_sample, hidden_layer_prob_1 = self.sample_hidden_layer(visible_layer_data, hidden_layer_bias, interaction_weight)
# k-step Gibbs sampling
for _ in range(self.k):
visible_layer_sample, visible_layer_prob_1 = self.sample_visible_layer(hidden_layer_sample, visible_layer_bias, interaction_weight)
hidden_layer_sample, _ = self.sample_hidden_layer(visible_layer_sample, hidden_layer_bias, interaction_weight)
if is_train:
# Used in backward and next forward
aux[0][:] = visible_layer_sample
aux[1][:] = hidden_layer_sample
self.assign(out_data[0], req[0], visible_layer_prob_1)
self.assign(out_data[1], req[1], hidden_layer_prob_1)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
visible_layer_data = in_data[0] # (num_batch, num_visible)
visible_layer_sample = aux[0] # (num_batch, num_visible)
hidden_layer_prob_1 = out_data[1] # (num_batch, num_hidden)
hidden_layer_sample = aux[1] # (num_batch, num_hidden)
grad_visible_layer_bias = (visible_layer_sample - visible_layer_data).mean(axis=0)
grad_hidden_layer_bias = (hidden_layer_sample - hidden_layer_prob_1).mean(axis=0)
grad_interaction_weight= (mx.nd.linalg.gemm2(visible_layer_sample.expand_dims(2), hidden_layer_sample.expand_dims(1)) -
mx.nd.linalg.gemm2(visible_layer_data.expand_dims(2), hidden_layer_prob_1.expand_dims(1))
).mean(axis=0)
# We don't need the gradient on the visible layer input
self.assign(in_grad[1], req[1], grad_visible_layer_bias)
self.assign(in_grad[2], req[2], grad_hidden_layer_bias)
self.assign(in_grad[3], req[3], grad_interaction_weight)
def sample_hidden_layer(self, visible_layer_batch, hidden_layer_bias, interaction_weight):
return self.sample_layer(visible_layer_batch, hidden_layer_bias, interaction_weight, False)
def sample_visible_layer(self, hidden_layer_batch, visible_layer_bias, interaction_weight):
return self.sample_layer(hidden_layer_batch, visible_layer_bias, interaction_weight, True)
def sample_layer(self, other_layer_sample, layer_bias, interaction_weight, interaction_transpose):
prob_1 = mx.nd.linalg.gemm(
other_layer_sample,
interaction_weight,
layer_bias.tile(reps=(other_layer_sample.shape[0], 1)),
transpose_b=interaction_transpose) # (num_batch, num_units_in_layer)
prob_1.sigmoid(out=prob_1)
return mx.nd.random.uniform(shape=prob_1.shape) < prob_1, prob_1
@mx.operator.register('BinaryRBM')
class BinaryRBMProp(mx.operator.CustomOpProp):
# Auxiliary states are requested only if `for_training` is true.
def __init__(self, num_hidden, k, for_training):
super(BinaryRBMProp, self).__init__(False)
self.num_hidden = int(num_hidden)
self.k = int(k)
self.for_training = ast.literal_eval(for_training)
def list_arguments(self):
# 0: (batch size, the number of visible units)
# 1: (the number of visible units,)
# 2: (the number of hidden units,)
# 3: (the number of visible units, the number of hidden units)
return ['data', 'visible_layer_bias', 'hidden_layer_bias', 'interaction_weight']
def list_outputs(self):
# 0: The probabilities that each visible unit is 1 after `k` steps of Gibbs sampling starting from the given `data`.
# (batch size, the number of visible units)
# 1: The probabilities that each hidden unit is 1 conditional on the given `data`.
# (batch size, the number of hidden units)
return ['visible_layer_prob_1', 'hidden_layer_prob_1']
def list_auxiliary_states(self):
# Used only if `self.for_trainig is true.
# 0: Store the visible layer samples obtained in the forward pass, used in the backward pass.
# (batch size, the number of visible units)
# 1: Store the hidden layer samples obtained in the forward pass, used in the backward and next forward pass.
# (batch size, the number of hidden units)
return ['aux_visible_layer_sample', 'aux_hidden_layer_sample'] if self.for_training else []
def infer_shape(self, in_shapes):
visible_layer_data_shape = in_shapes[0] # The input data
visible_layer_bias_shape = (visible_layer_data_shape[1],)
hidden_layer_bias_shape = (self.num_hidden,)
interaction_shape = (visible_layer_data_shape[1], self.num_hidden)
visible_layer_sample_shape = visible_layer_data_shape
visible_layer_prob_1_shape = visible_layer_sample_shape
hidden_layer_sample_shape = (visible_layer_data_shape[0], self.num_hidden)
hidden_layer_prob_1_shape = hidden_layer_sample_shape
return [visible_layer_data_shape, visible_layer_bias_shape, hidden_layer_bias_shape, interaction_shape], \
[visible_layer_prob_1_shape, hidden_layer_prob_1_shape], \
[visible_layer_sample_shape, hidden_layer_sample_shape] if self.for_training else []
def infer_type(self, in_type):
return [in_type[0], in_type[0], in_type[0], in_type[0]], \
[in_type[0], in_type[0]], \
[in_type[0], in_type[0]] if self.for_training else []
def create_operator(self, ctx, in_shapes, in_dtypes):
return BinaryRBM(self.k)
# For gluon API
class BinaryRBMBlock(mx.gluon.HybridBlock):
def __init__(self, num_hidden, k, for_training, **kwargs):
super(BinaryRBMBlock, self).__init__(**kwargs)
with self.name_scope():
self.num_hidden = num_hidden
self.k = k
self.for_training = for_training
self.visible_layer_bias = self.params.get('visible_layer_bias', shape=(0,), allow_deferred_init=True)
self.hidden_layer_bias = self.params.get('hidden_layer_bias', shape=(0,), allow_deferred_init=True)
self.interaction_weight= self.params.get('interaction_weight', shape=(0, 0), allow_deferred_init=True)
if for_training:
self.aux_visible_layer_sample = self.params.get('aux_visible_layer_sample', shape=(0, 0), allow_deferred_init=True)
self.aux_hidden_layer_sample = self.params.get('aux_hidden_layer_sample', shape=(0, 0), allow_deferred_init=True)
def hybrid_forward(self, F, data, visible_layer_bias, hidden_layer_bias, interaction_weight, aux_visible_layer_sample=None, aux_hidden_layer_sample=None):
# As long as `for_training` is kept constant, this conditional statement does not prevent hybridization.
if self.for_training:
return F.Custom(
data,
visible_layer_bias,
hidden_layer_bias,
interaction_weight,
aux_visible_layer_sample,
aux_hidden_layer_sample,
num_hidden=self.num_hidden,
k=self.k,
for_training=self.for_training,
op_type='BinaryRBM')
else:
return F.Custom(
data,
visible_layer_bias,
hidden_layer_bias,
interaction_weight,
num_hidden=self.num_hidden,
k=self.k,
for_training=self.for_training,
op_type='BinaryRBM')
def estimate_log_likelihood(visible_layer_bias, hidden_layer_bias, interaction_weight, ais_batch_size, ais_num_batch, ais_intermediate_steps, ais_burn_in_steps, data, ctx):
# The base-rate RBM with no hidden layer. The visible layer bias is set to the same with the given RBM.
# This is not the only possible choice but simple and works well.
base_rate_visible_layer_bias = visible_layer_bias
base_rate_visible_prob_1 = base_rate_visible_layer_bias.sigmoid()
log_base_rate_z = base_rate_visible_layer_bias.exp().log1p().sum()
def log_intermediate_unnormalized_prob(visible_layer_sample, beta):
p = mx.nd.dot(
visible_layer_sample,
(1 - beta) * base_rate_visible_layer_bias + beta * visible_layer_bias)
if beta != 0:
p += mx.nd.linalg.gemm(
visible_layer_sample,
interaction_weight,
hidden_layer_bias.tile(reps=(visible_layer_sample.shape[0], 1)),
transpose_b=False,
alpha=beta,
beta=beta).exp().log1p().sum(axis=1)
return p
def sample_base_rbm():
rands = mx.nd.random.uniform(shape=(ais_batch_size, base_rate_visible_prob_1.shape[0]), ctx=ctx)
return rands < base_rate_visible_prob_1.tile(reps=(ais_batch_size, 1))
def sample_intermediate_visible_layer(visible_layer_sample, beta):
for _ in range(ais_burn_in_steps):
hidden_prob_1 = mx.nd.linalg.gemm(
visible_layer_sample,
interaction_weight,
hidden_layer_bias.tile(reps=(visible_layer_sample.shape[0], 1)),
transpose_b=False,
alpha=beta,
beta=beta)
hidden_prob_1.sigmoid(out=hidden_prob_1)
hidden_layer_sample = mx.nd.random.uniform(shape=hidden_prob_1.shape, ctx=ctx) < hidden_prob_1
visible_prob_1 = mx.nd.linalg.gemm(
hidden_layer_sample,
interaction_weight,
visible_layer_bias.tile(reps=(hidden_layer_sample.shape[0], 1)),
transpose_b=True,
alpha=beta,
beta=beta) + (1 - beta) * base_rate_visible_layer_bias
visible_prob_1.sigmoid(out=visible_prob_1)
visible_layer_sample = mx.nd.random.uniform(shape=visible_prob_1.shape, ctx=ctx) < visible_prob_1
return visible_layer_sample
def array_from_batch(batch):
if isinstance(batch, mx.io.DataBatch):
return batch.data[0].as_in_context(ctx).flatten()
else: # batch is an instance of list in the case of gluon DataLoader
return batch[0].as_in_context(ctx).flatten()
importance_weight_sum = 0
num_ais_samples = ais_num_batch * ais_batch_size
for _ in range(ais_num_batch):
log_importance_weight = 0
visible_layer_sample = sample_base_rbm()
for n in range(1, ais_intermediate_steps + 1):
beta = 1. * n / ais_intermediate_steps
log_importance_weight += \
log_intermediate_unnormalized_prob(visible_layer_sample, beta) - \
log_intermediate_unnormalized_prob(visible_layer_sample, (n - 1.) / ais_intermediate_steps)
visible_layer_sample = sample_intermediate_visible_layer(visible_layer_sample, beta)
importance_weight_sum += log_importance_weight.exp().sum()
log_z = (importance_weight_sum / num_ais_samples).log() + log_base_rate_z
log_likelihood = 0
num_data = 0
for batch in data:
batch_array = array_from_batch(batch)
log_likelihood += log_intermediate_unnormalized_prob(batch_array, 1) - log_z
num_data += batch_array.shape[0]
log_likelihood = log_likelihood.sum() / num_data
return log_likelihood.asscalar(), log_z.asscalar()
|
|
"""
Feature scoring functionality
"""
import math
from operator import itemgetter
import numpy as np
import pandas as pd
from sklearn.ensemble import (ExtraTreesClassifier, ExtraTreesRegressor,
RandomForestClassifier, RandomForestRegressor)
from sklearn.feature_selection import (f_regression, f_classif, chi2)
from .scenario_discovery_util import RuleInductionType
from ..util import get_module_logger
# Created on Jul 9, 2014
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
#
# TODO:: look at
# http://scikit-learn.org/stable/auto_examples/linear_model/plot_sparse_recovery.html#example-linear-model-plot-sparse-recovery-py
__all__ = ['F_REGRESSION', 'F_CLASSIFICATION', 'CHI2',
'get_univariate_feature_scores', 'get_rf_feature_scores',
'get_ex_feature_scores', 'get_feature_scores_all']
_logger = get_module_logger(__name__)
F_REGRESSION = f_regression
F_CLASSIFICATION = f_classif
CHI2 = chi2
def _prepare_experiments(experiments):
"""
transform the experiments structured array into a numpy array.
Parameters
----------
experiments :DataFrame
Returns
-------
ndarray, list
"""
try:
experiments = experiments.drop('scenario', axis=1)
except KeyError:
pass
x = experiments.copy()
x_nominal = x.select_dtypes(exclude=np.number)
x_nominal_columns = x_nominal.columns.values
for column in x_nominal_columns:
if np.unique(x[column]).shape == (1,):
x = x.drop(column, axis=1)
_logger.info(("{} dropped from analysis "
"because only a single category").format(column))
else:
x[column] = x[column].astype('category').cat.codes
return x.values, x.columns.tolist()
def _prepare_outcomes(outcomes, classify):
"""
transform the outcomes dict into a vector with either the class allocation
or the value.
Parameters
----------
outcomes : dict
the outcomes dict
classify : callable or str
a classify function or variable analogous to PRIM
Returns
-------
1d ndarray
the return from classify
bool
data is categorical (True) or continuous (False)
Raises
--------
TypeError
if classify is neither a StringType nor a callable
KeyError
if classify is a string which is not a key in the outcomes dict.
"""
if isinstance(classify, str):
try:
y = outcomes[classify]
except KeyError as e:
raise e
categorical = False
elif callable(classify):
y = classify(outcomes)
categorical = True
else:
raise TypeError("unknown type for classify")
return y, categorical
def get_univariate_feature_scores(x, y, score_func=F_CLASSIFICATION):
"""
calculate feature scores using univariate statistical tests. In case of
categorical data, chi square or the Anova F value is used. In case of
continuous data the Anova F value is used.
Parameters
----------
x : structured array
y : 1D nd.array
score_func : {F_CLASSIFICATION, F_REGRESSION, CHI2}
the score function to use, one of f_regression (regression), or
f_classification or chi2 (classification).
Returns
-------
pandas DataFrame
sorted in descending order of tuples with uncertainty and feature
scores (i.e. p values in this case).
"""
x, uncs = _prepare_experiments(x)
pvalues = score_func(x, y)[1]
pvalues = np.asarray(pvalues)
pvalues = zip(uncs, pvalues)
pvalues = list(pvalues)
pvalues.sort(key=itemgetter(1))
pvalues = pd.DataFrame(pvalues)
pvalues = pvalues.set_index(0)
return pvalues
def get_rf_feature_scores(x, y, mode=RuleInductionType.CLASSIFICATION,
nr_trees=250,
max_features='auto', max_depth=None,
min_samples_split=2, min_samples_leaf=1,
bootstrap=True, oob_score=True, random_state=None):
"""
Get feature scores using a random forest
Parameters
----------
x : structured array
y : 1D nd.array
mode : {RuleInductionType.CLASSIFICATION, RuleInductionType.REGRESSION}
nr_trees : int, optional
nr. of trees in forest (default=250)
max_features : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
max_depth : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
min_samples : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
min_samples_leaf : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
bootstrap : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
oob_score : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
random_state : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
Returns
-------
pandas DataFrame
sorted in descending order of tuples with uncertainty and feature
scores
object
either RandomForestClassifier or RandomForestRegressor
"""
x, uncs = _prepare_experiments(x)
if mode == RuleInductionType.CLASSIFICATION:
rfc = RandomForestClassifier
criterion = 'gini'
elif mode == RuleInductionType.REGRESSION:
rfc = RandomForestRegressor
criterion = 'mse'
else:
raise ValueError('{} not valid for mode'.format(mode))
forest = rfc(n_estimators=nr_trees,
criterion=criterion,
max_features=max_features,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
bootstrap=bootstrap,
oob_score=oob_score,
random_state=random_state)
forest.fit(x, y)
importances = forest.feature_importances_
importances = zip(uncs, importances)
importances = list(importances)
importances.sort(key=itemgetter(1), reverse=True)
importances = pd.DataFrame(importances)
importances = importances.set_index(0)
return importances, forest
def get_ex_feature_scores(x, y, mode=RuleInductionType.CLASSIFICATION,
nr_trees=100, max_features=None, max_depth=None,
min_samples_split=2, min_samples_leaf=None,
min_weight_fraction_leaf=0, max_leaf_nodes=None,
bootstrap=True, oob_score=True, random_state=None):
"""
Get feature scores using extra trees
Parameters
----------
x : structured array
y : 1D nd.array
mode : {RuleInductionType.CLASSIFICATION, RuleInductionType.REGRESSION}
nr_trees : int, optional
nr. of trees in forest (default=250)
max_features : int, float, string or None, optional
by default, it will use number of featers/3, following
Jaxa-Rozen & Kwakkel (2018) doi: 10.1016/j.envsoft.2018.06.011
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
max_depth : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
min_samples_split : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
min_samples_leaf : int, optional
defaults to 1 for N=1000 or lower, from there on
proportional to sqrt of N
(see discussion in Jaxa-Rozen & Kwakkel (2018) doi: 10.1016/j.envsoft.2018.06.011)
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
min_weight_fraction_leaf : float, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
max_leaf_nodes: int or None, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
bootstrap : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
oob_score : bool, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
random_state : int, optional
see http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
Returns
-------
pandas DataFrame
sorted in descending order of tuples with uncertainty and feature
scores
object
either ExtraTreesClassifier or ExtraTreesRegressor
"""
x, uncs = _prepare_experiments(x)
# TODO
# max_features = number of variables/3
#
# min_samples_leaf
# 1000 - >
# then proportional based on sqrt of N
# dus sqrt(N) / Sqrt(1000) met 1 als minimumd
if max_features is None:
max_features = int(round(x.shape[1] / 3))
if min_samples_leaf is None:
min_samples_leaf = max(1,
int(round(
math.sqrt(x.shape[0]) / math.sqrt(1000))))
if mode == RuleInductionType.CLASSIFICATION:
etc = ExtraTreesClassifier
criterion = 'gini'
elif mode == RuleInductionType.REGRESSION:
etc = ExtraTreesRegressor
criterion = 'mse'
else:
raise ValueError('{} not valid for mode'.format(mode))
extra_trees = etc(n_estimators=nr_trees,
criterion=criterion,
max_features=max_features,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_leaf_nodes=max_leaf_nodes,
bootstrap=bootstrap,
oob_score=oob_score,
random_state=random_state)
extra_trees.fit(x, y)
importances = extra_trees.feature_importances_
importances = zip(uncs, importances)
importances = list(importances)
importances.sort(key=itemgetter(1), reverse=True)
importances = pd.DataFrame(importances)
importances = importances.set_index(0)
return importances, extra_trees
algorithms = {'extra trees': get_ex_feature_scores,
'random forest': get_rf_feature_scores,
'univariate': get_univariate_feature_scores}
def get_feature_scores_all(x, y, alg='extra trees',
mode=RuleInductionType.REGRESSION,
**kwargs):
"""perform feature scoring for all outcomes using the specified feature
scoring algorithm
Parameters
----------
x : numpy structured array
y : dict of 1d numpy arrays
the outcomes, with a string as key, and a 1D array for each outcome
alg : {'extra trees', 'random forest', 'univariate'}, optional
mode : {RuleInductionType.REGRESSION, RuleInductionType.CLASSIFICATION}, optional
kwargs : dict, optional
any remaining keyword arguments will be passed to the specific
feature scoring algorithm
Returns
-------
DataFrame instance
"""
complete = None
for key, value in y.items():
fs, _ = algorithms[alg](x, value, mode=mode, **kwargs)
fs = fs.rename(columns={1: key})
if complete is None:
complete = fs.T
else:
complete = complete.append(fs.T, sort=True)
return complete.T
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.provision.test.test_install -*-
"""
Install flocker on a remote node.
"""
import posixpath
from textwrap import dedent
from urlparse import urljoin, urlparse
from effect import Func, Effect
import yaml
from zope.interface import implementer
from characteristic import attributes
from pyrsistent import PRecord, field
from flocker.acceptance.testtools import DatasetBackend
from ._libcloud import INode
from ._common import PackageSource, Variants
from ._ssh import (
run, run_from_args,
sudo, sudo_from_args,
put,
run_remotely
)
from ._effect import sequence
from flocker import __version__ as version
from flocker.cli import configure_ssh
from flocker.common.version import (
get_installable_version, get_package_key_suffix, is_release,
)
# A systemctl sub-command to start or restart a service. We use restart here
# so that if it is already running it gets restart (possibly necessary to
# respect updated configuration) and because restart will also start it if it
# is not running.
START = "restart"
ZFS_REPO = {
'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"epel/zfs-release.el7.noarch.rpm",
}
ARCHIVE_BUCKET = 'clusterhq-archive'
def get_repository_url(distribution, flocker_version):
"""
Return the URL for the repository of a given distribution.
For ``yum``-using distributions this gives the URL to a package that adds
entries to ``/etc/yum.repos.d``. For ``apt``-using distributions, this
gives the URL for a repo containing a Packages(.gz) file.
:param bytes distribution: The Linux distribution to get a repository for.
:param bytes flocker_version: The version of Flocker to get a repository
for.
:return bytes: The URL pointing to a repository of packages.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
distribution_to_url = {
# TODO instead of hardcoding keys, use the _to_Distribution map
# and then choose the name
'centos-7': "https://{archive_bucket}.s3.amazonaws.com/"
"{key}/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
key='centos',
),
# This could hardcode the version number instead of using
# ``lsb_release`` but that allows instructions to be shared between
# versions, and for earlier error reporting if you try to install on a
# separate version. The $(ARCH) part must be left unevaluated, hence
# the backslash escapes (one to make shell ignore the $ as a
# substitution marker, and then doubled to make Python ignore the \ as
# an escape marker). The output of this value then goes into
# /etc/apt/sources.list which does its own substitution on $(ARCH)
# during a subsequent apt-get update
'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
'ubuntu-15.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
}
try:
return distribution_to_url[distribution]
except KeyError:
raise UnsupportedDistribution()
def get_repo_options(flocker_version):
"""
Get a list of options for enabling necessary yum repositories.
:param bytes flocker_version: The version of Flocker to get options for.
:return: List of bytes for enabling (or not) a testing repository.
"""
is_dev = not is_release(flocker_version)
if is_dev:
return ['--enablerepo=clusterhq-testing']
else:
return []
class UnsupportedDistribution(Exception):
"""
Raised if trying to support a distribution which is not supported.
"""
@attributes(['distribution'])
class DistributionNotSupported(NotImplementedError):
"""
Raised when the provisioning step is not supported on the given
distribution.
:ivar bytes distribution: The distribution that isn't supported.
"""
def __str__(self):
return "Distribution not supported: %s" % (self.distribution,)
@implementer(INode)
class ManagedNode(PRecord):
"""
A node managed by some other system (eg by hand or by another piece of
orchestration software).
"""
address = field(type=bytes, mandatory=True)
private_address = field(type=(bytes, type(None)),
initial=None, mandatory=True)
distribution = field(type=bytes, mandatory=True)
def task_client_installation_test():
"""
Check that the CLI is working.
"""
return run_from_args(['flocker-deploy', '--version'])
def install_cli_commands_yum(distribution, package_source):
"""
Install Flocker CLI on CentOS.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
sudo(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))),
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/tmp/clusterhq-build.repo'))
commands.append(sudo_from_args([
'cp', '/tmp/clusterhq-build.repo',
'/etc/yum.repos.d/clusterhq-build.repo']))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-cli-%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
def install_cli_commands_ubuntu(distribution, package_source):
"""
Install flocker CLI on Ubuntu.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
# Minimal images often have cleared apt caches and are missing
# packages that are common in a typical release. These commands
# ensure that we start from a good base system with the required
# capabilities, particularly that the add-apt-repository command
# and HTTPS URLs are supported.
# FLOC-1880 will ensure these are necessary and sufficient.
sudo_from_args(["apt-get", "update"]),
sudo_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add ClusterHQ repo for installation of Flocker packages.
sudo(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))))
]
if use_development_branch:
# Add BuildBot repo for running tests
commands.append(sudo_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)), '/tmp/apt-pref'))
commands.append(sudo_from_args([
'mv', '/tmp/apt-pref', '/etc/apt/preferences.d/buildbot-900']))
# Update to read package info from new repos
commands.append(sudo_from_args(["apt-get", "update"]))
if package_source.os_version:
package = 'clusterhq-flocker-cli=%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
_task_install_commands = {
'centos-7': install_cli_commands_yum,
'ubuntu-14.04': install_cli_commands_ubuntu,
'ubuntu-15.04': install_cli_commands_ubuntu,
}
def task_install_cli(distribution, package_source=PackageSource()):
"""
Install flocker CLI on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
return _task_install_commands[distribution](distribution, package_source)
def install_cli(package_source, node):
"""
Return an effect to run the CLI installation tasks on a remote node.
:param package_source: Package source description
:param node: Remote node description
"""
return run_remotely(
node.get_default_username(), node.address,
task_install_cli(node.distribution, package_source))
def task_configure_brew_path():
"""
Configure non-interactive shell to use all paths.
By default, OSX provides a minimal $PATH, for programs run via SSH. In
particular /usr/local/bin (which contains `brew`) isn't in the path. This
configures the path to have it there.
"""
return put(
path='.bashrc',
content=dedent("""\
if [ -x /usr/libexec/path_helper ]; then
eval `/usr/libexec/path_helper -s`
fi
"""))
def task_test_homebrew(recipe):
"""
The commands used to install a Homebrew recipe for Flocker and test it.
This taps the ClusterHQ/tap tap, which means that Homebrew looks in the
ClusterHQ/homebrew-tap GitHub repository for any recipe name given.
:param bytes recipe: The name of a recipe in a either the official Homebrew
tap or ClusterHQ/tap, or a URL pointing to a recipe.
:return Effect: Commands used to install a Homebrew recipe for Flocker and
test it.
"""
return sequence([
run_from_args(['brew', 'tap', 'ClusterHQ/tap']),
run("brew update"),
run("brew install {recipe}".format(recipe=recipe)),
run("brew test {recipe}".format(recipe=recipe)),
])
def task_install_ssh_key():
"""
Install the authorized ssh keys of the current user for root as well.
"""
return sequence([
sudo_from_args(['cp', '.ssh/authorized_keys',
'/root/.ssh/authorized_keys']),
])
def task_upgrade_kernel(distribution):
"""
Upgrade kernel.
"""
if distribution == 'centos-7':
return sequence([
run_from_args([
"yum", "install", "-y", "kernel-devel", "kernel"]),
run_from_args(['sync']),
])
elif distribution == 'ubuntu-14.04':
# Not required.
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def task_disable_selinux(distribution):
"""
Disable SELinux for this session and permanently.
XXX: Remove this when we work out suitable SELinux settings.
See https://clusterhq.atlassian.net/browse/FLOC-619.
"""
if distribution in ('centos-7',):
return sequence([
run("if selinuxenabled; then setenforce 0; fi"),
run("test -e /etc/selinux/config && "
"sed --in-place='.preflocker' "
"'s/^SELINUX=.*$/SELINUX=disabled/g' "
"/etc/selinux/config"),
])
elif distribution in ('ubuntu-14.04',):
# Ubuntu does not have SELinux enabled
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def task_install_control_certificates(ca_cert, control_cert, control_key):
"""
Install certificates and private key required by the control service.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath control_cert: Path to control service certificate on
local machine.
:param FilePath control_key: Path to control service private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/control-service.crt",
content=control_cert.getContent()),
put(path="/etc/flocker/control-service.key",
content=control_key.getContent()),
])
def task_install_node_certificates(ca_cert, node_cert, node_key):
"""
Install certificates and private key required by a node.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath node_cert: Path to node certificate on
local machine.
:param FilePath node_key: Path to node private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/node.crt",
content=node_cert.getContent()),
put(path="/etc/flocker/node.key",
content=node_key.getContent()),
])
def task_enable_docker(distribution):
"""
Start docker and configure it to start automatically.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(["systemctl", "enable", "docker.service"]),
run_from_args(["systemctl", "start", "docker.service"]),
])
elif distribution == 'ubuntu-14.04':
# Ubuntu enables docker service during installation
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def open_firewalld(service):
"""
Open firewalld port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(command + [service])
for command in [['firewall-cmd', '--permanent', '--add-service'],
['firewall-cmd', '--add-service']]])
def open_ufw(service):
"""
Open ufw port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(['ufw', 'allow', service])
])
def task_enable_flocker_control(distribution):
"""
Enable flocker-control service.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-control']),
run_from_args(['systemctl', START, 'flocker-control']),
])
elif distribution == 'ubuntu-14.04':
# Since the flocker-control service is currently installed
# alongside the flocker-dataset-agent service, the default control
# service configuration does not automatically start the
# service. Here, we provide an override file to start it.
return sequence([
put(
path='/etc/init/flocker-control.override',
content=dedent('''\
start on runlevel [2345]
stop on runlevel [016]
'''),
),
run("echo 'flocker-control-api\t4523/tcp\t\t\t# Flocker Control API port' >> /etc/services"), # noqa
run("echo 'flocker-control-agent\t4524/tcp\t\t\t# Flocker Control Agent port' >> /etc/services"), # noqa
run_from_args(['service', 'flocker-control', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_open_control_firewall(distribution):
"""
Open the firewall for flocker-control.
"""
if distribution in ('centos-7',):
open_firewall = open_firewalld
elif distribution == 'ubuntu-14.04':
open_firewall = open_ufw
else:
raise DistributionNotSupported(distribution=distribution)
return sequence([
open_firewall(service)
for service in ['flocker-control-api', 'flocker-control-agent']
])
def task_enable_flocker_agent(distribution, control_node,
dataset_backend=DatasetBackend.zfs,
dataset_backend_configuration=dict(
pool=u'flocker'
)):
"""
Configure and enable the flocker agents.
:param bytes control_node: The address of the control agent.
:param DatasetBackend dataset_backend: The volume backend the nodes are
configured with. (This has a default for use in the documentation).
:param dict dataset_backend_configuration: The backend specific
configuration options.
"""
dataset_backend_configuration = dataset_backend_configuration.copy()
dataset_backend_configuration.update({
u"backend": dataset_backend.name,
})
put_config_file = put(
path='/etc/flocker/agent.yml',
content=yaml.safe_dump(
{
"version": 1,
"control-service": {
"hostname": control_node,
"port": 4524,
},
"dataset": dataset_backend_configuration,
},
),
)
if distribution in ('centos-7',):
return sequence([
put_config_file,
run_from_args(['systemctl', 'enable', 'flocker-dataset-agent']),
run_from_args(['systemctl', START, 'flocker-dataset-agent']),
run_from_args(['systemctl', 'enable', 'flocker-container-agent']),
run_from_args(['systemctl', START, 'flocker-container-agent']),
])
elif distribution == 'ubuntu-14.04':
return sequence([
put_config_file,
run_from_args(['service', 'flocker-dataset-agent', 'start']),
run_from_args(['service', 'flocker-container-agent', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_create_flocker_pool_file():
"""
Create a file-back zfs pool for flocker.
"""
return sequence([
run('mkdir -p /var/opt/flocker'),
run('truncate --size 10G /var/opt/flocker/pool-vdev'),
run('zpool create flocker /var/opt/flocker/pool-vdev'),
])
def task_install_zfs(distribution, variants=set()):
"""
Install ZFS on a node.
:param bytes distribution: The distribution the node is running.
:param set variants: The set of variant configurations to use when
"""
commands = []
if distribution == 'ubuntu-14.04':
commands += [
# ZFS not available in base Ubuntu - add ZFS repo
run_from_args([
"add-apt-repository", "-y", "ppa:zfs-native/stable"]),
]
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
# Package spl-dkms sometimes does not have libc6-dev as a
# dependency, add it before ZFS installation requires it.
# See https://github.com/zfsonlinux/zfs/issues/3298
run_from_args(["apt-get", "-y", "install", "libc6-dev"]),
run_from_args(['apt-get', '-y', 'install', 'zfsutils']),
]
elif distribution in ('centos-7',):
commands += [
run_from_args(["yum", "install", "-y", ZFS_REPO[distribution]]),
]
if distribution == 'centos-7':
commands.append(
run_from_args(["yum", "install", "-y", "epel-release"]))
if Variants.ZFS_TESTING in variants:
commands += [
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager', '--enable', 'zfs-testing'])
]
commands += [
run_from_args(['yum', 'install', '-y', 'zfs']),
]
else:
raise DistributionNotSupported(distribution)
return sequence(commands)
def configure_zfs(node, variants):
"""
Configure ZFS for use as a Flocker backend.
:param INode node: The node to configure ZFS on.
:param set variants: The set of variant configurations to use when
:return Effect:
"""
return sequence([
run_remotely(
username='root',
address=node.address,
commands=task_upgrade_kernel(
distribution=node.distribution),
),
node.reboot(),
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_zfs(
distribution=node.distribution,
variants=variants),
task_create_flocker_pool_file(),
]),
),
Effect(
Func(lambda: configure_ssh(node.address, 22))),
])
def _uninstall_flocker_ubuntu1404():
"""
Return an ``Effect`` for uninstalling the Flocker package from an Ubuntu
14.04 machine.
"""
return run_from_args([
b"apt-get", b"remove", b"-y", b"--purge", b"clusterhq-python-flocker",
])
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
])
_flocker_uninstallers = {
"ubuntu-14.04": _uninstall_flocker_ubuntu1404,
"centos-7": _uninstall_flocker_centos7,
}
def task_uninstall_flocker(distribution):
"""
Return an ``Effect`` for uninstalling the Flocker package from the given
distribution.
"""
return _flocker_uninstallers[distribution]()
def uninstall_flocker(nodes):
"""
Return an ``Effect`` for uninstalling the Flocker package from all of the
given nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_uninstall_flocker(node.distribution)
)
def task_install_flocker(
distribution=None,
package_source=PackageSource(),
):
"""
Install flocker cluster on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
if distribution in ('ubuntu-14.04', 'ubuntu-15.04'):
commands = [
# Ensure add-apt-repository command and HTTPS URLs are supported
# FLOC-1880 will ensure these are necessary and sufficient
run_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add Docker repo for recent Docker versions
run_from_args([
"add-apt-repository", "-y", "ppa:james-page/docker"]),
# Add ClusterHQ repo for installation of Flocker packages.
run(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))),
]
if use_development_branch:
# Add BuildBot repo for testing
commands.append(run_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(
dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)),
'/etc/apt/preferences.d/buildbot-900'))
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
]
if package_source.os_version:
package = 'clusterhq-flocker-node=%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
# Install Flocker node and all dependencies
commands.append(run_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
elif distribution in ('centos-7',):
commands = [
run(command="yum clean all"),
run(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/etc/yum.repos.d/clusterhq-build.repo'))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-node-%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
commands.append(run_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
else:
raise UnsupportedDistribution()
ACCEPTANCE_IMAGES = [
"postgres:latest",
"clusterhq/mongodb:latest",
"clusterhq/flask",
"clusterhq/flaskenv",
"busybox",
]
def task_pull_docker_images(images=ACCEPTANCE_IMAGES):
"""
Pull docker images.
:param list images: List of images to pull. Defaults to images used in
acceptance tests.
"""
return sequence([
run_from_args(['docker', 'pull', image]) for image in images
])
def task_enable_updates_testing(distribution):
"""
Enable the distribution's proposed updates repository.
:param bytes distribution: See func:`task_install_flocker`
"""
raise DistributionNotSupported(distribution=distribution)
def task_enable_docker_head_repository(distribution):
"""
Enable the distribution's repository containing in-development docker
builds.
:param bytes distribution: See func:`task_install_flocker`
"""
if distribution == "centos-7":
return sequence([
put(content=dedent("""\
[virt7-testing]
name=virt7-testing
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
enabled=1
gpgcheck=0
"""),
path="/etc/yum.repos.d/virt7-testing.repo")
])
else:
raise DistributionNotSupported(distribution=distribution)
def provision(distribution, package_source, variants):
"""
Provision the node for running flocker.
This drives all the common node installation steps in:
* http://doc-dev.clusterhq.com/gettingstarted/installation.html
:param bytes address: Address of the node to provision.
:param bytes username: Username to connect as.
:param bytes distribution: See func:`task_install_flocker`
:param PackageSource package_source: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
commands = []
if Variants.DISTRO_TESTING in variants:
commands.append(task_enable_updates_testing(distribution))
if Variants.DOCKER_HEAD in variants:
commands.append(task_enable_docker_head_repository(distribution))
commands.append(
task_install_flocker(
package_source=package_source, distribution=distribution))
if distribution in ('centos-7'):
commands.append(task_disable_selinux(distribution))
commands.append(task_enable_docker(distribution))
return sequence(commands)
def _run_on_all_nodes(nodes, task):
"""
Run some commands on some nodes.
:param nodes: An iterable of ``Node`` instances where the commands should
be run.
:param task: A one-argument callable which is called with each ``Node`` and
should return the ``Effect`` to run on that node.
:return: An ``Effect`` that runs the commands on a group of nodes.
"""
return sequence(list(
run_remotely(
username='root',
address=node.address,
commands=task(node),
)
for node in nodes
))
def install_flocker(nodes, package_source):
"""
Return an ``Effect`` that installs a certain version of Flocker on the
given nodes.
:param nodes: An iterable of ``Node`` instances on which to install
Flocker.
:param PackageSource package_source: The version of Flocker to install.
:return: An ``Effect`` which installs Flocker on the nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_install_flocker(
distribution=node.distribution,
package_source=package_source,
)
)
def configure_cluster(cluster, dataset_backend_configuration):
"""
Configure flocker-control, flocker-dataset-agent and
flocker-container-agent on a collection of nodes.
:param Cluster cluster: Description of the cluster to configure.
:param dict dataset_backend_configuration: Configuration parameters to
supply to the dataset backend.
"""
return sequence([
run_remotely(
username='root',
address=cluster.control_node.address,
commands=sequence([
task_install_control_certificates(
cluster.certificates.cluster.certificate,
cluster.certificates.control.certificate,
cluster.certificates.control.key),
task_enable_flocker_control(cluster.control_node.distribution),
]),
),
sequence([
sequence([
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_node_certificates(
cluster.certificates.cluster.certificate,
certnkey.certificate,
certnkey.key),
task_enable_flocker_agent(
distribution=node.distribution,
control_node=cluster.control_node.address,
dataset_backend=cluster.dataset_backend,
dataset_backend_configuration=(
dataset_backend_configuration
),
)]),
),
]) for certnkey, node
in zip(cluster.certificates.nodes, cluster.agent_nodes)
])
])
|
|
#!/usr/bin/env python
"""
Unit tests for :mod:`clans.fmt`.
"""
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import clans.fmt
from io import StringIO
from datetime import datetime
TEST_DATA = {
'test_format_date': datetime(2015, 1, 28, 23, 46),
'test_format_date_dst': datetime(2012, 4, 12, 20, 6),
'test_format_plan': {
'lastupdated': datetime(2013, 8, 5, 13, 22),
'lastlogin': datetime(2013, 8, 7, 3, 42),
'username': 'username',
'planname': 'clever catchphrase',
'plan': 'this is my plan\n'
},
'test_br_stripping': "one<br>two<br/>three<br />four",
'test_crlf_stripping': "one\ntwo\rthree\r\nfour",
'test_html_escapes': ("I develop in a "quick & dirty" "
"<style>"),
'test_tag_stripping': "This is <b>bold</b> and <i>italic</i>",
'test_underline': ('This is <span '
'class="underline">underlined</span><!--u-->'),
'test_link_formatting': ('<a href="http://www.facebook.com/" '
'class="onplan">my favorite website</a>'),
'test_love_formatting': ('[<a href="read.php?searchname=gorp" '
'class="planlove">GORP</a>]'),
'test_love_formatting': ('[<a href="read.php?searchname=gorp" '
'class="planlove">GORP</a>]'),
'test_psub_formatting': '<p class="sub">we all live in a yellow</p>',
'test_hr_formatting': """I need a clean
<hr>break""",
'test_print_list': ['one', 'two', 'three', 'four'],
'test_print_autoread': {
'Level 1': ['bff', 'interesting', 'funny', 'gorp'],
'Level 2': ['roommate', 'rando'],
'Level 3': ['meh', ],
},
'test_print_search_results': [
('plan1', 1, ['snip one <b>term</b> context', ]),
('plan2', 2, ['snip one <b>term</b> context',
'snip two <b>term</b> context']),
('plan3', 2, ['snip <b>term</b> twice <b>term</b> twice', ])
],
}
class FakeStdout(unittest.TestCase):
def setUp(self):
self.real_stdout = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = self.real_stdout
class TestRaw(FakeStdout):
def setUp(self):
FakeStdout.setUp(self)
self.fmt = clans.fmt.RawFormatter()
def test_format_date(self):
date = TEST_DATA['test_format_date']
text = self.fmt.format_date(date)
expect = '2015-01-28 23:46:00' # no assumed timezone
self.assertEqual(expect, text)
# plan format test: a header, two newlines, then the plan.
def test_format_plan(self):
data = TEST_DATA['test_format_plan']
text = self.fmt.format_plan(**data)
expect = """\
Username: username
Last Updated: 2013-08-05 13:22:00
Last Login: 2013-08-07 03:42:00
Name: clever catchphrase
this is my plan
"""
self.assertEqual(expect, text)
# other tests
def test_print_list(self):
lst = TEST_DATA['test_print_list']
self.fmt.print_list(lst)
output = sys.stdout.getvalue()
expect = u"""\
one
two
three
four
"""
self.assertEqual(expect, output)
def test_print_bulleted_list(self):
lst = TEST_DATA['test_print_list']
self.fmt.print_list(lst, bullets=True)
output = sys.stdout.getvalue()
expect = u"""\
- one
- two
- three
- four
"""
self.assertEqual(expect, output)
def test_print_search_results(self):
results = TEST_DATA['test_print_search_results']
self.fmt.print_search_results(results)
output = sys.stdout.getvalue()
expect = u"""\
[plan1]: 1
- snip one <b>term</b> context
[plan2]: 2
- snip one <b>term</b> context
- snip two <b>term</b> context
[plan3]: 2
- snip <b>term</b> twice <b>term</b> twice
"""
self.assertEqual(expect, output)
def test_print_autoread(self):
autoread = TEST_DATA['test_print_autoread']
self.fmt.print_autoread(autoread)
output = sys.stdout.getvalue()
expect = u"""\
Level 1:
bff
interesting
funny
gorp
Level 2:
roommate
rando
Level 3:
meh
"""
self.assertEqual(expect, output)
class TestJSON(FakeStdout):
def setUp(self):
FakeStdout.setUp(self)
self.fmt = clans.fmt.JSONFormatter()
def test_format_date(self):
date = TEST_DATA['test_format_date']
text = self.fmt.format_date(date)
expect = '2015-01-28T23:46:00Z' # ISO 8601, assume UTC
self.assertEqual(expect, text)
def test_format_plan(self):
data = TEST_DATA['test_format_plan']
text = self.fmt.format_plan(**data)
expect = """\
{
"username": "username",
"lastupdated": "2013-08-05T13:22:00Z",
"lastlogin": "2013-08-07T03:42:00Z",
"planname": "clever catchphrase",
"plan": "this is my plan\\n"
}"""
self.assertEqual(expect, text)
# other tests
def test_print_list(self):
lst = TEST_DATA['test_print_list']
self.fmt.print_list(lst)
output = sys.stdout.getvalue()
expect = u"""\
[
"one",
"two",
"three",
"four"
]
"""
self.assertEqual(expect, output)
def test_print_search_results(self):
results = TEST_DATA['test_print_search_results']
self.fmt.print_search_results(results)
output = sys.stdout.getvalue()
expect = u"""\
[
[
"plan1",
1,
[
"snip one <b>term</b> context"
]
],
[
"plan2",
2,
[
"snip one <b>term</b> context",
"snip two <b>term</b> context"
]
],
[
"plan3",
2,
[
"snip <b>term</b> twice <b>term</b> twice"
]
]
]
"""
self.assertEqual(expect, output)
def test_print_autoread(self):
autoread = TEST_DATA['test_print_autoread']
self.fmt.print_autoread(autoread)
output = sys.stdout.getvalue()
expect = u"""\
{
"Level 1": [
"bff",
"interesting",
"funny",
"gorp"
],
"Level 2": [
"roommate",
"rando"
],
"Level 3": [
"meh"
]
}
"""
self.assertEqual(expect, output)
class TestText(TestRaw):
def setUp(self):
FakeStdout.setUp(self)
kwargs = {'timezone': 'US/Central'}
self.fmt = clans.fmt.TextFormatter(**kwargs)
def test_format_date(self):
date = TEST_DATA['test_format_date']
text = self.fmt.format_date(date)
expect = 'Wed January 28 2015, 5:46 PM'
# timezone should have been converted to central.
# same format as plans web (but w/o ordinal suffix)
self.assertEqual(expect, text)
def test_format_date_dst(self):
date = TEST_DATA['test_format_date_dst']
text = self.fmt.format_date(date)
expect = 'Thu April 12 2012, 3:06 PM'
self.assertEqual(expect, text)
def test_alt_tz(self):
date = TEST_DATA['test_format_date']
kwargs = {'timezone': 'US/Pacific'}
fmt = clans.fmt.TextFormatter(**kwargs)
text = fmt.format_date(date)
expect = 'Wed January 28 2015, 3:46 PM'
self.assertEqual(expect, text)
def test_no_tz(self):
date = TEST_DATA['test_format_date']
fmt = clans.fmt.TextFormatter()
text = fmt.format_date(date)
self.assertIn('January', text)
def test_format_plan(self):
data = TEST_DATA['test_format_plan']
text = self.fmt.format_plan(**data)
expect = """\
Username: username
Last Updated: Mon August 5 2013, 8:22 AM
Last Login: Tue August 6 2013, 10:42 PM
Name: clever catchphrase
this is my plan
"""
self.assertEqual(expect, text)
# filter_html tests
def test_br_stripping(self):
html = TEST_DATA['test_br_stripping']
text = self.fmt.filter_html(html)
expect = "one\ntwo\nthree\nfour"
self.assertEqual(expect, text)
def test_crlf_stripping(self):
html = TEST_DATA['test_crlf_stripping']
text = self.fmt.filter_html(html)
expect = "onetwothreefour"
self.assertEqual(expect, text)
def test_html_escapes(self):
html = TEST_DATA['test_html_escapes']
text = self.fmt.filter_html(html)
expect = 'I develop in a "quick & dirty" <style>'
self.assertEqual(expect, text)
def test_tag_stripping(self):
html = TEST_DATA['test_tag_stripping']
expect = "This is bold and italic"
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_underline(self):
html = TEST_DATA['test_underline']
expect = 'This is underlined'
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_link_formatting(self):
html = TEST_DATA['test_link_formatting']
expect = '[http://www.facebook.com/|my favorite website]'
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_love_formatting(self):
html = TEST_DATA['test_love_formatting']
expect = '[GORP]'
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_psub_formatting(self):
html = TEST_DATA['test_psub_formatting']
expect = 'we all live in a yellow'
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_hr_formatting(self):
html = TEST_DATA['test_hr_formatting']
expect = """I need a clean
======================================================================
break"""
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_print_list_in_columns(self):
lst = TEST_DATA['test_print_list']
self.fmt.print_list(lst, columns=True)
output = sys.stdout.getvalue()
expect = u"""\
one two three four
"""
self.assertEqual(expect, output)
def test_print_search_results(self):
results = TEST_DATA['test_print_search_results']
self.fmt.print_search_results(results)
output = sys.stdout.getvalue()
expect = u"""\
[plan1]: 1
- snip one term context
[plan2]: 2
- snip one term context
- snip two term context
[plan3]: 2
- snip term twice term twice
"""
self.assertEqual(expect, output)
class TestColor(TestText):
def setUp(self):
FakeStdout.setUp(self)
kwargs = {'timezone': 'US/Central'}
self.fmt = clans.fmt.ColorFormatter(**kwargs)
# plan format test
def test_format_plan(self):
data = TEST_DATA['test_format_plan']
text = self.fmt.format_plan(**data)
expect = """\
%sUsername%s: username
%sLast Updated%s: Mon August 5 2013, 8:22 AM
%sLast Login%s: Tue August 6 2013, 10:42 PM
%sName%s: clever catchphrase
this is my plan
""" % (('\x1b[1m', '\x1b[22m') * 4)
self.assertEqual(expect, text)
# filter_html tests
def test_tag_stripping(self):
html = TEST_DATA['test_tag_stripping']
expect = "This is \x1b[1mbold\x1b[22m and \x1b[2mitalic\x1b[22m"
# italic is actually 'dim', bold 'bright'
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_underline(self):
html = TEST_DATA['test_underline']
expect = 'This is \x1b[4munderlined\x1b[0m'
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_link_formatting(self):
html = TEST_DATA['test_link_formatting']
expect = ('[\x1b[32mhttp://www.facebook.com/\x1b[39m|\x1b[35m'
'my favorite website\x1b[39m]')
# green for the link, magenta for the link text
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_love_formatting(self):
html = TEST_DATA['test_love_formatting']
expect = '[\x1b[1m\x1b[34mGORP\x1b[22m\x1b[39m]' # blue and bold
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
def test_hr_formatting(self):
html = TEST_DATA['test_hr_formatting']
expect = """I need a clean
\x1b[31m======================================================================\x1b[39m
break"""
# red
text = self.fmt.filter_html(html)
self.assertEqual(expect, text)
# other tests
def test_print_search_results(self):
results = TEST_DATA['test_print_search_results']
self.fmt.print_search_results(results)
output = sys.stdout.getvalue()
expect = u"""\
[{link}plan1{unlink}]: 1
- snip one {bold}term{unbold} context
[{link}plan2{unlink}]: 2
- snip one {bold}term{unbold} context
- snip two {bold}term{unbold} context
[{link}plan3{unlink}]: 2
- snip {bold}term{unbold} twice {bold}term{unbold} twice
""".format(bold='\x1b[1m', unbold='\x1b[22m',
link='\x1b[1m\x1b[34m', unlink='\x1b[22m\x1b[39m')
self.assertEqual(expect, output)
if __name__ == "__main__":
unittest.main(buffer=True)
|
|
import collections
import sys
import os
from enum import Enum
from functools import reduce
from inspect import signature
from time import gmtime, strftime
import numpy as np
import tensorflow as tf
# noinspection PyProtectedMember
from tensorflow.python.ops.gradients_impl import _hessian_vector_product as _hvp
from tensorflow.python.client.session import register_session_run_conversion_functions
from tensorflow.python.ops import control_flow_ops
utils_settings = {
'WSA': True, # gives a warning when new nodes are being created during session runtime
}
def get_available_devices(gpu_only=False):
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
if gpu_only: return [x.name for x in local_device_protos if x.device_type == 'GPU']
else: return [x.name for x in local_device_protos]
def wsr(node): # warning on session running
if utils_settings['WSA'] and tf.get_default_session():
print('Warning: creating nodes at tf.Session runtime: node %s' % node,
file=sys.stderr)
return node
def call_method_optional_param(method, optional_param):
"""
Convenience method for function that may or not have one parameter (like feed dictionary suppliers)
:param method:
:param optional_param:
:return: the result of calling the method.
"""
return method(optional_param) if len(signature(method).parameters) > 0 else method()
CONFIG_GPU_GROWTH = tf.ConfigProto(allow_soft_placement=True)
CONFIG_GPU_GROWTH.gpu_options.allow_growth = True
def simple_name(tensor_or_name):
if isinstance(tensor_or_name, str): return tensor_or_name.split(':')[0]
return tensor_or_name.name.split(':')[0]
class SummaryUtil:
def __init__(self, ops=None, condition=None, writer=None, fd_supplier=None):
"""
Utility class used by SummariesUtils to collect summary ops
:param ops: summary operations
:param condition: (default: always True)
:param writer: either a tf.summary.FileWriter or a string for standard log dirs
:param fd_supplier: (default None supplier)
"""
assert ops is not None
self.ops = ops
self.condition = condition if condition else lambda step: True
self.writer = (tf.summary.FileWriter(writer + strftime("%a_%d_%b_%Y_%H:%M:%S", gmtime()))
if isinstance(writer, str) else writer)
self.fd_supplier = fd_supplier if fd_supplier else lambda: None
class SummaryUtils:
def __init__(self, *summary_utils_list):
self.summary_list = summary_utils_list
def run(self, session, step):
[su.writer.add_summary(
session.run(su.ops, feed_dict=su.fd_supplier(step)), step
) for su in self.summary_list if su.condition(step)]
PrintUtil = collections.namedtuple('PrintSummaryUtil', ['print_supplier', 'condition'])
def stepwise_pu(print_supplier, every_n_steps):
return PrintUtil(print_supplier=print_supplier,
condition=lambda step: step == every_n_steps)
def unconditional_pu(print_supplier):
return PrintUtil(print_supplier=print_supplier, condition=lambda step: True)
class PrintUtils: # TODO fix this class... Looks horrible now
def __init__(self, *print_list, add_print=False): # here can be added also the standard output
self.print_utils = print_list
self.add_print = add_print
def run(self, session=None, step=None):
if self.add_print:
[print(pu.print_supplier(session, step)) for pu in self.print_utils if pu.condition(step)]
else:
[pu.print_supplier(session, step) for pu in self.print_utils if pu.condition(step)]
class MergedUtils:
def __init__(self, *utils):
self.merged = utils
def run(self, session, step):
[u.run(session, step) for u in self.merged]
# ------------------------------------------------
# DATA PROCESSING
# ------------------------------------------------
def np_normalize_data(data, m=None, sd=None, return_mean_and_sd=False):
if m is None:
m = np.mean(data, 0)
sd = np.std(data, 0)
normalized_data = (data - m) / sd
if return_mean_and_sd:
return normalized_data, m, sd
return normalized_data
def norm(v, name='norm'):
"""
The the norm of a Tensor: if v is a vector then the norm is the Euclid's norm L2, otherwise it computes the
Frobenius norm.
:param name: (optional, default norm) name of the name_scope
:param v: tf.Tensor or Variable
:return: a tensor that computes the norm
"""
with tf.name_scope(name):
return wsr(tf.sqrt(tf.reduce_sum(tf.square(v))))
def cross_entropy_loss(labels, logits, linear_input=True, eps=1.e-5, name='cross_entropy_loss'):
"""
Clipped standard-version cross entropy loss. Implemented because the standard function
tf.nn.softmax_cross_entropy_with_logits has wrong (?) Hessian.
Clipped because it easily brings to nan otherwise, especially when calculating the Hessian.
Maybe the code could be optimized since ln(softmax(z_j)) = z_j - prod z_i . Should benchmark it.
:param labels:
:param logits: softmax or linear output of the model
:param linear_input: True (default) if y is linear in which case tf.nn.softmax will be applied to y
:param eps: (optional, default 1.e-5) clipping value for log.
:param name: (optional, default cross_entropy_loss) name scope for the defined operations.
:return: tensor for the cross_entropy_loss (WITHOUT MEAN ON THE EXAMPLES)
"""
with tf.name_scope(name):
softmax_out = tf.nn.softmax(logits) if linear_input else logits
return -tf.reduce_sum(
labels * tf.log(tf.clip_by_value(softmax_out, eps, 1. - eps)), reduction_indices=[1]
)
def binary_cross_entropy(labels, logits, linear_input=True, eps=1.e-5, name='binary_cross_entropy_loss'):
"""
Same as cross_entropy_loss for the binary classification problem. the model should have a one dimensional output,
the targets should be given in form of a matrix of dimensions batch_size x 1 with values in [0,1].
:param labels:
:param logits: sigmoid or linear output of the model
:param linear_input: (default: True) is y is linear in which case tf.nn.sigmoid will be applied to y
:param eps: (optional, default 1.e-5) clipping value for log.
:param name: (optional, default binary_cross_entropy_loss) name scope for the defined operations.
:return: tensor for the cross_entropy_loss (WITHOUT MEAN ON THE EXAMPLES)
"""
with tf.name_scope(name):
sigmoid_out = tf.nn.sigmoid(logits)[:, 0] if linear_input else logits
# tgs = targets if len(targets.)
return - (labels * tf.log(tf.clip_by_value(sigmoid_out, eps, 1. - eps)) +
(1. - labels) * tf.log(tf.clip_by_value(1. - sigmoid_out, eps, 1. - eps)))
def l_diag_mul(d, m, name='diag_matrix_product'):
"""
Performs diag(d) * m product
:param d: n-dimensional vector
:param m: n x k matrix
:param name: optional name
:return: n x k matrix
"""
with tf.name_scope(name):
return tf.transpose(d * tf.transpose(m))
def matmul(a, b, benchmark=True, name='mul'): # TODO maybe put inside dot
"""
Interface function for matmul that works also with sparse tensors
:param a:
:param b:
:param benchmark:
:param name:
:return:
"""
a_is_sparse = isinstance(a, tf.SparseTensor)
with tf.name_scope(name):
if a_is_sparse:
mul = wsr(tf.matmul(tf.sparse_tensor_to_dense(a, default_value=0.), b))
if benchmark:
mul_ops = [wsr(tf.sparse_tensor_dense_matmul(a, b)),
mul, # others ?
# wsr(tf.nn.embedding_lookup_sparse()) # I couldn't figure out how this works......
]
def _avg_exe_times(op, repetitions):
from time import time
ex_times = []
for _ in range(repetitions):
st = time()
op.eval()
ex_times.append(time() - st)
return np.mean(ex_times[1:]), np.max(ex_times), np.min(ex_times)
with tf.Session(config=CONFIG_GPU_GROWTH).as_default():
tf.global_variables_initializer().run() # TODO here should only initialize necessary variable
# (downstream in the computation graph)
statistics = {op: _avg_exe_times(op, repetitions=4) for op in mul_ops}
[print(k, v) for k, v in statistics.items()]
mul = sorted(statistics.items(), key=lambda v: v[1][0])[0][0] # returns best one w.r.t. avg exe time
print(mul, 'selected')
else:
mul = wsr(tf.matmul(a, b))
return mul
# Define a context manager to suppress stdout and stderr.
class suppress_stdout_stderr(object):
"""
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
"""
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
def dot(v1, v2, name='dot'):
"""
Dot product (No idea why there isn't already in tensorflow...) and some partial extensions for matrix vector
multiplication. Should ideally copy `np.dot` method.
:param v1: first vector
:param v2: second vector
:return:
"""
v1_shape = v1.get_shape().ndims
v2_shape = v2.get_shape().ndims
# print(v1_shape, v2_shape)
if v1_shape > 1 and v2_shape > 1:
return tf.matmul(v1, v2, name=name)
elif v1_shape == 2 and v2_shape == 1:
if v1.get_shape().as_list()[1] != 1: # it is a true matrix
return tf.reduce_sum(v1 * v2, reduction_indices=[1], name=name)
else:
raise NotImplementedError('This would be a multiplication column vector times row vector.. TODO')
elif v1_shape == 1 and v2_shape == 2: # fine for mat
# this is a thing that is useful in DirectDoh
res = tf.reduce_sum(v1 * tf.transpose(v2), reduction_indices=[1])
if v2.get_shape().as_list()[1] == 1:
return tf.identity(res[0], name=name)
else:
return tf.identity(res[0], name=name)
elif v1_shape == 1 and v2_shape == 1:
return tf.reduce_sum(v1 * v2, name=name)
else:
raise NotImplementedError() # TODO finish implement this also with scalars and maybe with others
def vectorize_all(var_list):
"""Given a list of tensors returns their concatenated vectorization.
Note that for matrices the vectorization is row-wise instead of column-wise as
it should be in Magnus. Could it be a problem?
:param var_list: **bold**
:return: vectorization of `var_list`"""
return wsr(tf.concat([tf.reshape(_w, [-1]) for _w in var_list], 0))
def hv_1(_dyn, _lv, _v): # NOTE this is not an efficient implementation
"""Computes hessian-vector product (without storing the Hessian)
in a naive way. If _lv is a list of tensor, then vectorizes them with vectorize_all"""
res = []
for i in range(_v.get_shape()[0].value):
_hvi = tf.gradients(_dyn[i], _lv)
if isinstance(_lv, list):
_hvi = vectorize_all(_hvi)
res.append(
tf.reduce_sum(_hvi * _v)
)
return tf.stack(res) # takes forever....
def hvp(loss, w, v, name='hessian_vector_product'):
"""
Convenience function for hessian vector product.
:param name:
:param loss:
:param w:
:param v:
:return:
"""
# some parameter checking
if not isinstance(w, list) and not isinstance(v, list): # single inputs
if len(v.get_shape().as_list()) == 2 and len(w.get_shape().as_list()) == 1:
return tf.stack([
hvp(loss, w, v[:, k]) for k in range(v.get_shape().as_list()[1])
], axis=1)
return wsr(tf.identity(_hvp(loss, [w], [v]), name=name)[0])
return wsr(tf.identity(_hvp(loss, w, v), name=name))
def canonical_base(n):
identity = np.eye(n)
return [tf.constant(identity[:, j], dtype=tf.float32) for j in range(n)]
# ChunksInfo = collections.namedtuple('ChunksInfo', ['start', 'end', 'reshape'])
def as_list(obj):
"""
Makes sure `obj` is a list or otherwise converts it to a list with a single element.
:param obj:
:return: A `list`
"""
return obj if isinstance(obj, list) else [obj]
def as_tuple_or_list(obj):
"""
Make sure that `obj` is a tuple or a list and eventually converts it into a list with a single element
:param obj:
:return: A `tuple` or a `list`
"""
return obj if isinstance(obj, (list, tuple)) else [obj]
def merge_dicts(*dicts):
return reduce(lambda a, nd: {**a, **nd}, dicts, {})
def reshape_generator(original_var, start, end):
return lambda merged: wsr(tf.reshape(merged[start:end], original_var.get_shape()))
def cleaner_accuracy(x, correct_labels, epsilon):
n_examples = x.shape[0]
correct_guesses = np.abs(x - correct_labels) < epsilon
return np.count_nonzero(correct_guesses) / n_examples
def one_hot_confidence(vec, epsilon):
n_examples = vec.shape[0]
near_one_hot = np.amax(vec, 1) > 1 - epsilon
# print("near_one_hot ({}/{}): {}".format(near_one_hot.size, n_examples, near_one_hot)) #DEBUG
return np.count_nonzero(near_one_hot) / n_examples
def one_or_zero_similarity(x):
n_examples = x.shape[0]
one_or_zero_similarity_vector = np.abs(x - 0.5) * 2
return np.sum(one_or_zero_similarity_vector) / n_examples
class VlMode(Enum):
"""
Possible arguments for `MergedVariable.var_list` getter function
"""
RAW = 0
BASE = 1
TENSOR = 2
class MergedVariable:
"""
This class (that should ideally be a subclass of tf.Variable) implements the vectorization and management
of a list of tf.Variables in an hopefully convenient way.
"""
def __init__(self, var_list, model=None, name='merged_variable_tensor'):
"""
:param var_list: List of variables to merge and vectorize
:param name: (optional) name for the tensor
"""
self._var_list = var_list
self.tensor = tf.identity(vectorize_all([
# MergedVariable.get_tensor(v)
v
for v in var_list]), name=name)
self.name = name
self.model = model
self.chunks_info_dict = {}
start = 0
for v in self.var_list(VlMode.BASE): # CHANGED (in var_list)
dim_var = reduce(lambda v1, v2: v1 * v2, v.get_shape().as_list(), 1)
end = start + dim_var
self.chunks_info_dict[v] = reshape_generator(v, start, end)
start += dim_var
def var_list(self, mode=VlMode.RAW):
"""
Get the chunks that define this variable.
:param mode: (optional, default VL_MODE.RAW) VL_MODE.RAW: returns simply var_list, that may contain tf.Variables
or MergedVariables
VL_MODE.BASE: returns a list of tf.Variables that are the "base" variables that for this
MergedVariable
VL_MODE.TENSOR: returns a list of tf.Variables or tf.Tensor from the MergedVariables
:return: A list that may contain tf.Tensors, tf.Variables and/or MergedVariables
"""
if mode == VlMode.RAW:
return self._var_list
elif mode == VlMode.BASE:
return self._get_base_variable_list()
elif mode == VlMode.TENSOR:
return self._var_list_as_tensors() # return w unic tensor + copies augmented
else:
raise NotImplementedError('mode %d does not exists' % mode)
def initialize(self, session=None):
"""
Initialize this merged variable or call `model.initialize` if a model is associated to this
variable (see `Network.initialize`)
:param session:
:return:
"""
ss = session or tf.get_default_session()
assert ss, 'No default session'
ss.run(tf.variables_initializer(self.var_list(VlMode.BASE)))
if self.model:
self.model.initialize(session=session)
def _get_base_variable_list(self):
"""
This methods checks that all the elements of var_list are legitimate (tf.Variables or MergedVariables)
and returns the underlying tf.Variables.
:return:
"""
res = []
for v in self._var_list:
if isinstance(v, MergedVariable):
res.extend(v._get_base_variable_list())
elif isinstance(v, tf.Variable):
res.append(v)
else:
raise ValueError('something wrong here')
return res
def _var_list_as_tensors(self):
if any([isinstance(v, MergedVariable) for v in self._var_list]):
return [
#v
self.get_tensor(v)
for v in self._var_list]
else:
return [self.tensor]
def generate_swap_dict(self):
"""
Utility function to use in graph_editor.graph_replace in rfho.utils.vectorize_models
:return:
"""
return {v.op.outputs[0]: reshape(self.tensor) for v, reshape in self.chunks_info_dict.items()}
def assign(self, value, use_locking=False):
"""
Behaves as tf.Variable.assign, building assign ops for the underlying (original) Variables
:param value: rank-1 tensor. Assumes it has the same structure as the tensor contained in the object.
:param use_locking: (optional) see use_locking in `tf.Variables.assign`
:return: A list of `tf.Variables.assign` ops.
"""
assign_ops = [
wsr(v.assign(reshape(value), use_locking=use_locking)) for v, reshape in self.chunks_info_dict.items()
]
return tf.group(*assign_ops)
def eval(self, feed_dict=None):
"""
Calls `eval` on `self.tensor`
:param feed_dict:
:return:
"""
return self.tensor.eval(feed_dict=feed_dict)
def get_shape(self):
"""
Calls `get_shape` on `self.tensor`
:return:
"""
return self.tensor.get_shape()
@property
def graph(self):
return self.tensor.graph
@staticmethod
def get_tensor(v): #
return v.tensor if isinstance(v, MergedVariable) else v
@property
def dtype(self):
return self.tensor.dtype
def __pow__(self, power, modulo=None):
return self.tensor.__pow__(power)
def __add__(self, other):
return self.tensor.__add__(other)
def __sub__(self, other):
return self.tensor.__sub__(other)
def __mul__(self, other):
return self.tensor.__mul__(other)
@staticmethod
def tensor_conversion(value, dtype=None, name=None, as_ref=False):
"""
tensorflow tensor conversion function. Simply gives to tensorflow the underlying tensor
:param value:
:param dtype:
:param name:
:param as_ref:
:return:
"""
# if as_ref:
# raise NotImplemented()
return tf.convert_to_tensor(value.tensor, dtype=dtype, name=name)
tf.register_tensor_conversion_function(MergedVariable, MergedVariable.tensor_conversion)
# import tensorflow.client.session as tf_pcs
register_session_run_conversion_functions(MergedVariable,
lambda merged_var: ([merged_var.tensor],
lambda val: val[0])) #
def flatten_list(lst):
from itertools import chain
return list(chain(*lst))
def simple_size_of_with_pickle(obj):
import pickle
import os
name = str(np.random.rand())
with open(name, mode='bw') as f:
pickle.dump(obj, f)
size = os.stat(name).st_size
os.remove(name)
return size
class GlobalStep:
"""
Helper for global step (probably would be present also in tensorflow)
"""
def __init__(self, start_from=0, name='global_step'):
self._var = tf.Variable(start_from, trainable=False, name=name)
self.increase = self.var.assign_add(1)
self.decrease = self.var.assign_sub(1)
self.gs_placeholder = tf.placeholder(tf.int32)
self.assign_op = self.var.assign(self.gs_placeholder)
def eval(self, auto_initialize=True):
if not auto_initialize:
return self.var.eval()
else:
try:
return self.var.eval()
except tf.errors.FailedPreconditionError:
tf.variables_initializer([self.var]).run()
return self.var.eval()
@property
def var(self):
return self._var
class ZMergedMatrix:
"""
Class for dealing with the Z quantities in the forward mode (which are the total derivative of the state
w.r.t. hyperparameters).
"""
def __init__(self, matrix_list, name='Z'):
self.components = as_list(matrix_list)
# assumes that you want matrices and not vectors. This means that eventual vectors are casted to matrices
# of dimension (n, 1)
for i, c in enumerate(self.components):
if c.get_shape().ndims == 1:
self.components[i] = tf.transpose(tf.stack([c]))
self.tensor = tf.concat(self.components, 0, name=name)
# def create_copy(self):
# new_components = []
# for c in self.components:
# with tf.name_scope('copy_z'):
# if isinstance(c, tf.Variable):
# print('copying variable')
# new_components.append(tf.Variable(c, name=simple_name(c)))
# else:
# print('copying tensor')
# new_components.append(c)
# return ZMergedMatrix(new_components)
def initializer(self):
"""
:return:
"""
assert all([isinstance(c, tf.Variable) for c in self.components]), 'this merged matrix is not composed by Vars'
return tf.variables_initializer(self.components)
def assign(self, value_list):
if isinstance(value_list, ZMergedMatrix):
value_list = value_list.components
assert len(value_list) == len(self.components), 'the length of value_list and of z, components must coincide'
value_list = tf.tuple(value_list) # THIS PROBABLY SOLVES THE PROBLEM!
ao1 = [tf.assign(c, v) for c, v in zip(self.components, value_list)]
return tf.group(*ao1)
# noinspection PyUnusedLocal
def var_list(self, mode=VlMode.RAW):
return self.components
def __add__(self, other):
assert isinstance(other, ZMergedMatrix) # TODO make it a little bit more flexible (e.g. case of GD)
assert len(other.components) == len(self.components)
return ZMergedMatrix([c + v for c, v in zip(self.components, other.components)])
def get_shape(self):
"""
Tensorflow
:return:
"""
return self.tensor.get_shape()
def eval(self, feed_dict=None):
return self.tensor.eval(feed_dict=feed_dict)
@property
def name(self):
"""
:return: name of the tensor
"""
return self.tensor.name
@staticmethod
def tensor_conversion(value, dtype=None, name=None, as_ref=False):
"""
tensorflow tensor conversion function. Simply gives to tensorflow the underlying tensor
:param value:
:param dtype:
:param name:
:param as_ref:
:return:
"""
if as_ref:
raise NotImplemented()
return tf.convert_to_tensor(value.tensor, dtype=dtype, name=name)
tf.register_tensor_conversion_function(ZMergedMatrix, ZMergedMatrix.tensor_conversion)
register_session_run_conversion_functions(ZMergedMatrix,
lambda zmm: ([zmm.tensor],
lambda val: val[0])) #
|
|
"""
Methods for reading vcf files and importing it in our database representation.
We leverage pyvcf as much as possible.
"""
from django.db import reset_queries
import vcf
from main.model_utils import get_dataset_with_type
from main.models import Chromosome
from main.models import ExperimentSample
from main.models import ReferenceGenome
from main.models import Variant
from main.models import VariantCallerCommonData
from main.models import VariantAlternate
from main.models import VariantEvidence
from variants.common import update_parent_child_variant_fields
from variants.dynamic_snp_filter_key_map import update_filter_key_map
SV_TYPES = {
'DEL': 'DELETION',
'DUP': 'DUPLICATION',
'DUP:TANDEM': 'DUPLICATION',
'INV': 'INVERSION'
}
UNKNOWN_VARIANT_TYPE = 'unknown'
IGNORE_VCF_RECORD_KEYS = [
# Bug in pyvcf where some alleles are some sort of object rather than str.
'alleles',
# Fake/Pseudo VCFs parsed as CSVs have ID fields, which we are skipping
'ID'
]
class QueryCache(object):
"""Manual caching for queries to avoid excessive db calls.
NOTE: Only trying this with one model so far. Haven't determined how
useful this really is.
"""
def __init__(self):
self.uid_to_experiment_sample_map = {}
def parse_alignment_group_vcf(alignment_group, vcf_dataset_type):
"""Parses the VCF associated with the AlignmentGroup and saves data there.
"""
vcf_dataset = get_dataset_with_type(alignment_group, vcf_dataset_type)
parse_vcf(vcf_dataset, alignment_group)
def parse_vcf(vcf_dataset, alignment_group):
"""
Parses the VCF and creates Variant models relative to ReferenceGenome.
alignment_group.options may contains:
* skip_het_only: throw out variants that are het only - if organism is
not diploid, these variants are likely to be just poorly mapped
reads, so discard the variants created by them. In the future, this
option will be moved to an alignment_group options dictionary.
"""
reference_genome = alignment_group.reference_genome
# This helper object will help prevent repeated calls to the database.
# We'll use it at least for ExperimentSamples.
query_cache = QueryCache()
# First count the number of records to give helpful status debug output.
record_count = 0
with open(vcf_dataset.get_absolute_location()) as fh:
vcf_reader = vcf.Reader(fh)
for record in vcf_reader:
record_count += 1
# Now iterate through the vcf file again and parse the data.
# NOTE: Do not save handles to the Variants, else suffer the wrath of a
# memory leak when parsing a large vcf file.
with open(vcf_dataset.get_absolute_location()) as fh:
vcf_reader = vcf.Reader(fh)
# First, update the reference_genome's key list with any new
# keys from this VCF.
reference_genome = ReferenceGenome.objects.get(id=reference_genome.id)
# Update the reference genome and grab it from the db again.
update_filter_key_map(reference_genome, vcf_reader)
reference_genome = ReferenceGenome.objects.get(id=reference_genome.id)
for record_idx, record in enumerate(vcf_reader):
print 'vcf_parser: Parsing %d out of %d' % (
record_idx + 1, record_count)
# Make sure the QueryCache object has experiment samples populated.
# Assumes every row has same samples. (Pretty sure this is true
# for well-formatted vcf file.)
if (len(query_cache.uid_to_experiment_sample_map) == 0 and
len(record.samples) > 0):
for sample in record.samples:
sample_uid = sample.sample
query_cache.uid_to_experiment_sample_map[sample_uid] = (
ExperimentSample.objects.get(uid=sample_uid))
# If the record has no GT_TYPE = 2 samples, then skip by default
if alignment_group.alignment_options['skip_het_only']:
if sum([s.gt_type == 2 for s in record.samples]) == 0:
print 'HET only, skipping record %d' % (record_idx + 1)
continue
# Get or create the Variant for this record. This step
# also generates the alternate objects and assigns their
# data fields as well.
get_or_create_variant(reference_genome,
record, vcf_dataset, alignment_group, query_cache)
# For large VCFs, the cached SQL object references can exhaust memory
# so we explicitly clear them here. Our efficiency doesn't really suffer.
reset_queries()
# Finally, update the parent/child relationships for these new
# created variants.
update_parent_child_variant_fields(alignment_group)
def extract_raw_data_dict(vcf_record):
"""Extract a dictionary of raw data from the Record.
Returns:
Dictionary representation of the record.
"""
# Keys that we need to do extra work with in order to copy.
MANUAL_KEYS = ['INFO', 'samples']
data_dict = {}
# Extract keys that do not need to be handled manually.
for key, value in vcf_record.__dict__.iteritems():
if key in IGNORE_VCF_RECORD_KEYS or key in MANUAL_KEYS:
continue
data_dict[str(key)] = value
# The TYPE is just a property of the record object.
# TODO: Do we care about the var_subtype()? (ts/tv/complex/sv type/etc?)
# Just store UNKNOWN into TYPE, because the field is not used
# (if SV, the type is stored in SVTYPE)
data_dict['TYPE'] = UNKNOWN_VARIANT_TYPE
# Populate 'INFO'
if hasattr(vcf_record, 'INFO'):
populate_common_data_info(data_dict, vcf_record)
return data_dict
def populate_common_data_info(data_dict, vcf_record):
"""Parses the vcf record INFO field and updates the data dict.
"""
for key, value in vcf_record.INFO.iteritems():
effective_key = 'INFO_' + key
data_dict[effective_key] = value
def get_or_create_variant(reference_genome, vcf_record, vcf_dataset,
alignment_group=None, query_cache=None):
"""Create a variant and its relations.
A new Variant is only created if it doesn't exist already. The following
relations are created:
* VariantCallerCommonData
* VariantEvidence
* VariantAlternate
Also go through all per-alt keys and add them as a json field
to the VariantAlternate object.
Right now this assumes we are always using Freebayes for alignment.
Args:
reference_genome: The ReferenceGenome.
vcf_record: pyvcf Record object.
vcf_dataset: Source Dataset for this data.
query_cache: QueryCache helper object for making queries.
Returns:
Tuple (Variant, List<VariantAlt>)
"""
# Build a dictionary of data for this record.
raw_data_dict = extract_raw_data_dict(vcf_record)
# Extract the REQUIRED fields from the common data object.
type = str(raw_data_dict.pop('TYPE'))
chromosome_label = raw_data_dict.pop('CHROM')
position = int(raw_data_dict.pop('POS'))
ref_value = raw_data_dict.pop('REF')
alt_values = raw_data_dict.pop('ALT')
# Make sure the chromosome cited in the VCF exists for
# the reference genome variant is being added to
if not chromosome_label in [chrom.label for chrom in
Chromosome.objects.filter(reference_genome=reference_genome)]:
variant_string = ('TYPE: ' + str(type) + ' CHROM: ' + str(chromosome_label) +
' POS: ' + str(position) + ' REF: ' + str(ref_value) +
' ALT: ' + str(alt_values if len(alt_values)-1 else alt_values[0]))
raise Exception(('The CHROM field of the following variant does not match any of '
'the chromosomes belonging to its reference genome:' + variant_string + '\n'
'Chromosomes belonging to reference genome ' + str(reference_genome.label) +
' are: ' + str([str(chrom.label) for chrom in
Chromosome.objects.filter(reference_genome=reference_genome)]).strip('[]')))
# Try to find an existing Variant, or create it.
variant, created = Variant.objects.get_or_create(
reference_genome=reference_genome,
chromosome=Chromosome.objects.filter(
reference_genome=reference_genome,
label=chromosome_label)[0],
position=position,
ref_value=ref_value
)
# We don't want to search by type above, but we do want to save
# the type here. There are weird cases where we might be overwriting
# the type (i.e. two SNVs with identical ref/alt but different types),
# but I think this is OK for now.
if type:
variant.type = type
variant.save()
# Whether or not this is an (structural variant) SV is determined in
# VariantAlternate data. Still, we want to expose this on the Variant
# level, so we check whether this is SV internally.
is_sv = False
alts = []
all_alt_keys = reference_genome.get_variant_alternate_map().keys()
raw_alt_keys = [k for k in raw_data_dict.keys() if k in all_alt_keys]
for alt_idx, alt_value in enumerate(alt_values):
# Grab the alt data for this alt index.
alt_data = dict([(k, raw_data_dict[k][alt_idx]) for k in raw_alt_keys])
var_alt, var_created = VariantAlternate.objects.get_or_create(
variant=variant,
alt_value=alt_value)
# If this is a new alternate, initialize the data dictionary
if var_created:
var_alt.data = {}
# TODO: We are overwriting keys here. Is this desired?
var_alt.data.update(alt_data)
var_alt.save()
if 'INFO_SVTYPE' in alt_data:
is_sv = True
alts.append(var_alt)
# Remove all per-alt keys from raw_data_dict before passing to VCC create.
[raw_data_dict.pop(k, None) for k in raw_alt_keys]
# Indicate whether this is SV type, making it queryable.
raw_data_dict['IS_SV'] = is_sv
# Create a common data object for this variant.
# NOTE: raw_data_dict only contains the values that were not popped until
# this point.
# Only create a VCCD if there is an associated alignment group.
if alignment_group:
common_data_obj = VariantCallerCommonData.objects.create(
alignment_group=alignment_group,
variant=variant,
source_dataset=vcf_dataset,
data=raw_data_dict
)
# TODO: What about num -2 objects? I'm really not excited about
# creating a VariantGenotype object, nor do I think it will
# be necessary, so skipping it for now, and keeping that data in
# the VCC object.
# Create a VariantEvidence object for each ExperimentSample.
# NOTE: VariantEvidence are automatically linked to the correct
# VariantAlternate after they are created in
# main.signals.post_variant_evidence_create()
for sample in vcf_record.samples:
sample_uid = sample.sample
sample_data_dict = extract_sample_data_dict(sample)
if query_cache is not None:
sample_obj = query_cache.uid_to_experiment_sample_map[sample_uid]
else:
sample_obj = ExperimentSample.objects.get(uid=sample_uid)
VariantEvidence.objects.create(
experiment_sample=sample_obj,
variant_caller_common_data=common_data_obj,
data=sample_data_dict)
return (variant, alts)
def extract_sample_data_dict(s):
"""Extract sample data from the pyvcf _Call object.
Args:
s: pyvcf _Call object.
Returns:
A dictionary representing the object.
"""
def _add_property(result_dict, s, key, eval_string):
"""Helper method to add keys. PyVCF is really buggy so we need to
be extra paranoid when parsing it.
"""
try:
result_dict[key.upper()] = getattr(s, eval_string)
except AttributeError:
result_dict[key.upper()] = None
result = {}
# The main properties we'll want to query across.
key_eval_string_pairs = (
('sample', 'sample'),
('called', 'called'),
('gt_bases', 'gt_bases'),
('gt_nums', 'gt_nums'),
('gt_type', 'gt_type'),
('is_het', 'is_het'),
('is_variant', 'is_variant'),
('phased', 'phased'),
)
for key, eval_string in key_eval_string_pairs:
_add_property(result, s, key, eval_string)
# Add data fields in slightly different way.
SAMPLE_DATA_FIELDS = [
'AO',
'DP',
'GL',
'GT',
'QA',
'QR',
'RO',
]
if hasattr(s, 'data'):
for key in SAMPLE_DATA_FIELDS:
if hasattr(s.data, key):
result[key] = getattr(s.data, key)
# TODO: Add support for SnpEff data.
return result
|
|
'''
Created 09/04/2014
@authors: Yifan Ning and Rich Boyce
@summary: parse drug synonymns, dbid, name from drugbank.xml then parse synonymns
from UNIIs records and match the results.
Output: FDA PreferredTerm, FDA synonymn, UNII, Drugbank drug, drugbank id, matchedByKey
'''
from lxml import etree
from lxml.etree import XMLParser, parse
import os, sys
from sets import Set
DRUGBANK_XML = "../drugbank.xml"
UNIIS_NAMES = "../UNII-data/UNIIs 27Jun2014 Names.txt"
PT_INCHI_RECORDS = "../UNII-data/UNIIs 27Jun2014 Records.txt"
NS = "{http://www.drugbank.ca}"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
'''
data structure of drugbank.xml
</drug><drug type="small molecule" created="2005-06-13 07:24:05 -0600"
updated="2013-09-16 17:11:29 -0600" version="4.0">
<drugbank-id>DB00641</drugbank-id>
ata structure of drugbank.xml
</drug><drug type="small molecule" created="2005-06-13 07:24:05 -0600"
updated="2013-09-16 17:11:29 -0600" version="4.0">
<drugbank-id>DB00641</drugbank-id>
<name>Simvastatin</name>
<property>
<kind>InChIKey</kind>
<value>InChIKey=RYMZZMVNJRMUDD-HGQWONQESA-N</value>
<source>ChemAxon</source>
</property>
<synonymns>
<synonymn>...</synonymn>
</synonyms>
'''
if len(sys.argv) > 4:
validate_mode = str(sys.argv[1])
DRUGBANK_XML = str(sys.argv[2])
UNIIS_NAMES = str(sys.argv[3])
PT_INCHI_RECORDS = str(sys.argv[4])
else:
print "Usage: parseDBIdAndUNIIsBySynonymns.py <match mode>(0: (Inchi | name | synomyns) matched, 1: (Inchi && (name | synomyns matched)) <drugbank.xml> <FDA_UNII_Names> <FDA_UNII_Records>)"
sys.exit(1)
## get dict of mappings of drugbank id, name, inchikeys and synonmymns
def parseDbIdAndSynonymns(root):
dict_name_inchi_syns = {}
for childDrug in root.iter(tag=NS + "drug"):
subId = childDrug.find(NS + "drugbank-id")
if subId == None:
continue
else:
drugbankid = subId.text
drugbankName = unicode(childDrug.find(NS + "name").text.upper())
dict_name_inchi_syns[drugbankName]={}
dict_name_inchi_syns[drugbankName]["dbid"] = drugbankid
## get inchikey
ikey = ""
for subProp in childDrug.iter(NS + "property"):
subKind = subProp.find(NS + "kind")
if subKind == None:
continue
elif subKind.text == "InChIKey":
subValue = subProp.find(NS + "value")
if subValue is not None:
ikey = subValue.text[9:]
dict_name_inchi_syns[drugbankName]["inchi"] = ikey
## get synonyms
set_syns = set()
syns = childDrug.find(NS + "synonyms")
if syns is not None:
for subProp in syns.iter():
if subProp == None or subProp.text == None:
continue
if subProp.text.strip().replace('\n',"") is not "":
set_syns.add(subProp.text.upper())
dict_name_inchi_syns[drugbankName]["syns"] = set_syns
return dict_name_inchi_syns
## get dict of unii with inchi from PT_INCHI_RECORDS
## UNII PT RN MF INCHIKEY EINECS NCIt ITIS NCBI PLANTS SMILES
def parsePTAndInchi(path):
dict_inchi = {}
for line in open(path,'r').readlines():
row = line.split('\t')
if len(row) == 0:
continue
unii = row[0]
inchi = row[4].strip().upper()
if unii and inchi:
dict_inchi[unii]=inchi
return dict_inchi
def validates(dict_unii_inchi, dict_xml, validate_mode):
#print "mode:" + validate_mode
#read mapping file that contains Name TYPE UNII PT
(NAME, TYPE, UNII, PT) = range(0,4)
for line in open(UNIIS_NAMES,'r').readlines():
row = line.split('\t')
if len(row) == 0:
continue
name = row[NAME].strip().upper()
unii = row[UNII]
inchi=""
if dict_unii_inchi.has_key(unii):
inchi = dict_unii_inchi[unii]
if inchi == "":
continue
drug_type = row[TYPE]
if (drug_type == "PT") or (drug_type == "SY") or (drug_type == "SN"):
if validate_mode is "0":
for k,v in dict_xml.items():
matchedBy = ""
if k == name:
matchedBy = "name"
if name in v["syns"]:
if matchedBy == "":
matchedBy = "synonyms"
else:
matchedBy += "ANDsynonyms"
if inchi == v["inchi"]:
if matchedBy == "":
matchedBy = "inchi"
else:
matchedBy += "ANDinchi"
if matchedBy is not "":
#print "MATCHED:" + matchedBy
#print "NAMES:" + name + "|" + unii + "|" + inchi
#print "DICT_XML:" + str(k) + "|" + str(v)
drugbankid = v["dbid"]
drugbankName = k
output = row[PT].strip() +'\t' + row[NAME].strip() +'\t' + row[UNII].strip() +'\t'+ drugbankName +'\t'+ drugbankid + '\t' + matchedBy
print output.encode('utf-8').strip()
break
elif validate_mode == "1":
for k,v in dict_xml.items():
#print str(k) + "|" + str(v)
matchedBy = ""
if inchi == v["inchi"]:
if k == name:
matchedBy = "nameANDinchi"
if name in v["syns"]:
if matchedBy == "":
matchedBy = "synonymsANDinchi"
else:
matchedBy = "nameANDsynonymsANDinchi"
if matchedBy is not "":
drugbankid = v["dbid"]
drugbankName = k
output = row[PT].strip() +'\t' + row[NAME].strip() +'\t' + row[UNII].strip() +'\t'+ drugbankName +'\t'+ drugbankid+ '\t' + matchedBy
print output.encode('utf-8').strip()
break
def main():
p = XMLParser(huge_tree=True)
tree = parse(DRUGBANK_XML,parser=p)
root = tree.getroot()
## get name, syns and inchi from drugbank.xml
dict_xml = parseDbIdAndSynonymns(root)
#print str(dict_xml)
dict_unii_inchi = parsePTAndInchi(PT_INCHI_RECORDS)
#print str(dict_unii_inchi)
validates(dict_unii_inchi, dict_xml, validate_mode)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
import zipfile
import gzip
class SNP:
def __init__(self,data):
self.rsid = data[0].strip('"')
self.chromosome = data[1].strip('"')
if len(self.chromosome) == 1 and self.chromosome.isdigit():
self.chromosome = '0'+self.chromosome
self.position = int(data[2].strip('"'))
self.genotype = data[3].strip('"')
if len(data) == 5:
self.genotype += data[4]
if self.genotype in ('--','00'):
self.genotype = None
elif self.chromosome == 'X' and len(self.genotype) == 2:
self.genotype = self.genotype[0]
def __str__(self):
return " ".join((self.rsid,self.chromosome,str(self.position),str(self.genotype)))
def hasAllel(self,a):
if self.genotype is not None:
return a in self.genotype
return False
def otherAllel(self,a):
if not self.hasAllel(a):
return None
if self.genotype[0] == a:
return self.genotype[1]
return self.genotype[0]
def hasGenotype(self):
return self.genotype is not None
def isHomozygous(self):
return self.genotype is not None and self.genotype[0] == self.genotype[1]
def isHeterozygous(self):
return self.genotype is not None and self.genotype[0] != self.genotype[1]
def matches(self,other):
if self.genotype[0] == other.genotype[0]:
return True
if len(self.genotype) == 2 and len(other.genotype) == 2:
return self.genotype[0] == other.genotype[1] or self.genotype[1] == other.genotype[0] or self.genotype[1] == other.genotype[1]
return False
class Genome:
def __init__(self,files,label=None):
self.label = label
self.chromosomes = {}
for f in files:
print f
if f.endswith('.zip'):
inz = zipfile.ZipFile(f,'r')
for zinfo in inz.infolist():
self.read(inz.open(zinfo))
if f.endswith('.gz'):
self.read(gzip.GzipFile(f,'r'))
def read(self,infile):
sep = None
for l in infile:
if len(l) and l[0] != '#':
if l.startswith('RSID') or l.startswith('rsid'):
if ',' in l:
sep = ','
else:
parts = l.strip().split(sep)
snp = SNP(parts)
if not snp.chromosome in self.chromosomes:
self.chromosomes[snp.chromosome] = {}
self.chromosomes[snp.chromosome][snp.position] = snp
def __str__(self):
clist = self.chromosomes.keys()
clist.sort()
ret = []
if self.label is not None:
ret.append(self.label)
for c in clist:
ret.append(c+' '+str(len(self.chromosomes[c])))
return '\n'.join(ret)
def compare(self,other,diffFile = None):
all_cromes = self.chromosomes.keys()
for c in other.chromosomes.keys():
if not c in all_cromes:
all_cromes.append(c)
all_cromes.sort()
total_tcount = 0
total_ocount = 0
total_matches = 0
total_nulls = 0
total_transposes = 0
total_diffs = 0
for c in all_cromes:
tcount = 0
if c in self.chromosomes:
tcount = len(self.chromosomes[c])
ocount = 0
if c in other.chromosomes:
ocount = len(other.chromosomes[c])
total_tcount += tcount
total_ocount += ocount
print 'Chromosome',c+':',self.label,tcount,'SNPs,',other.label,ocount,'SNPs'
if c in self.chromosomes and c in other.chromosomes:
matches = 0
diffs = 0
nulls = 0
for position,snp in self.chromosomes[c].iteritems():
if position in other.chromosomes[c]:
matches += 1
if snp.genotype is None or other.chromosomes[c][position].genotype is None:
nulls += 1
elif snp.genotype != other.chromosomes[c][position].genotype:
if len(snp.genotype) == 2 and snp.genotype[0] == other.chromosomes[c][position].genotype[1] and snp.genotype[1] == other.chromosomes[c][position].genotype[0]:
pass
else:
diffs += 1
if diffFile is not None:
diffFile.write(c+','+str(position)+','+snp.genotype+','+other.chromosomes[c][position].genotype+'\n')
total_matches += matches
total_nulls += nulls
total_diffs += diffs
print '\tcommon SNPs:',matches,'nulls (one or both):',nulls,'different results:',diffs,'({:.2%} disagreement)'.format(diffs/float(matches))
else:
print '\tno matching SNPs'
print 'All:',self.label,total_tcount,'SNPs,',other.label,total_ocount,'SNPs'
print '\tcommon SNPs:',total_matches,'nulls (one or both):',total_nulls,'different results:',total_diffs,'({:.2%} disagreement)'.format(total_diffs/float(total_matches))
def phase(self,p1,p2):
ckeys = self.chromosomes.keys()
ckeys.sort()
mutations = 0
for clabel in ckeys:
if clabel != 'MT' and clabel != 'Y' and clabel != 'X':
print clabel
c = self.chromosomes[clabel]
positions = c.keys()
positions.sort()
for p in positions:
sc = c[p]
sp1 = p1.chromosomes[clabel][p]
sp2 = p2.chromosomes[clabel][p]
gc = sc.genotype
gp1 = sp1.genotype
gp2 = sp2.genotype
p1c = '?'
p2c = '?'
p1nc = '?'
p2nc = '?'
mutation = False
if not sc.hasGenotype():
if sp1.isHomozygous():
p1c = sp1.genotype[0]
p1nc = p1c
if sp2.isHomozygous():
p2c = sp2.genotype[0]
p2nc = p2c
sc.phase = p1c+p2c
if sc.isHomozygous():
sc.phased = sc.genotype
if sp1.hasAllel(gc[0]):
p1c = gc[0]
p1nc = sp1.otherAllel(p1c)
elif not sp1.hasGenotype():
p1c = gc[0]
else:
mutation = True
if sp2.hasAllel(gc[1]):
p2c = gc[1]
p2nc = sp2.otherAllel(p2c)
elif not sp2.hasGenotype():
p2c = gc[1]
else:
mutation = True
if sc.isHeterozygous():
if sp1.hasAllel(gc[0]) and sp2.hasAllel(gc[1]):
if not (sp2.hasAllel(gc[0]) and sp1.hasAllel(gc[1])):
p1c = gc[0]
p1nc = sp1.otherAllel(p1c)
p2c = gc[1]
p2nc = sp2.otherAllel(p2c)
elif sp1.hasAllel(gc[1]) and sp2.hasAllel(gc[0]):
if not (sp2.hasAllel(gc[1]) and sp1.hasAllel(gc[0])):
p1c = gc[1]
p1nc = sp1.otherAllel(p1c)
p2c = gc[0]
p2nc = sp2.otherAllel(p2c)
if not sp1.hasGenotype() and sp2.isHomozygous():
if sc.hasAllel(gp2[0]):
p2c = gp2[0]
p2nc = p2c
p1c = sc.otherAllel(p2c)
if not sp2.hasGenotype() and sp1.isHomozygous():
if sc.hasAllel(gp1[0]):
p1c = gp1[0]
p1nc = p1c
p2c = sc.otherAllel(p1c)
phase = ' '+p1c+ ' '+p2c
phase += ' '+p1c+ ' '+p1nc
phase += ' '+p2c+ ' '+p2nc
if mutation:
phase += ' mutation'
mutations += 1
if not (sc.isHeterozygous() and sp1.isHeterozygous() and sp2.isHeterozygous) and '?' in phase:
if sc.hasGenotype() or sp1.hasGenotype() or sp2.hasGenotype():
if gp1 is None:
gp1 = '--'
if gp2 is None:
gp2 = '--'
if gc is None:
gc = '--'
print clabel,str(p), gc ,gp1,gp2,phase, sc.rsid
print mutations,'mutations'
|
|
from discord.ext import commands, tasks
from .utils import db, checks
from collections import Counter, defaultdict
import discord
import asyncio
import asyncpg
import datetime
import logging
import yarl
import re
import io
log = logging.getLogger(__name__)
BLOB_GUILD_ID = 272885620769161216
EMOJI_REGEX = re.compile(r'<a?:.+?:([0-9]{15,21})>')
EMOJI_NAME_REGEX = re.compile(r'[0-9a-zA-Z\_]{2,32}')
class BlobEmoji(commands.Converter):
async def convert(self, ctx, argument):
guild = ctx.bot.get_guild(BLOB_GUILD_ID)
emojis = {e.id: e for e in guild.emojis}
m = EMOJI_REGEX.match(argument)
if m is not None:
emoji = emojis.get(int(m.group(1)))
elif argument.isdigit():
emoji = emojis.get(int(argument))
else:
emoji = discord.utils.find(lambda e: e.name == argument, emojis.values())
if emoji is None:
raise commands.BadArgument('Not a valid blob emoji.')
return emoji
def partial_emoji(argument, *, regex=EMOJI_REGEX):
if argument.isdigit():
# assume it's an emoji ID
return int(argument)
m = regex.match(argument)
if m is None:
raise commands.BadArgument("That's not a custom emoji...")
return int(m.group(1))
def emoji_name(argument, *, regex=EMOJI_NAME_REGEX):
m = regex.match(argument)
if m is None:
raise commands.BadArgument('Invalid emoji name.')
return argument
class EmojiURL:
def __init__(self, *, animated, url):
self.url = url
self.animated = animated
@classmethod
async def convert(cls, ctx, argument):
try:
partial = await commands.PartialEmojiConverter().convert(ctx, argument)
except commands.BadArgument:
try:
url = yarl.URL(argument)
if url.scheme not in ('http', 'https'):
raise RuntimeError
path = url.path.lower()
if not path.endswith(('.png', '.jpeg', '.jpg', '.gif')):
raise RuntimeError
return cls(animated=url.path.endswith('.gif'), url=url)
except Exception:
raise commands.BadArgument('Not a valid or supported emoji URL.') from None
else:
return cls(animated=partial.animated, url=str(partial.url))
def usage_per_day(dt, usages):
tracking_started = datetime.datetime(2017, 3, 31, tzinfo=datetime.timezone.utc)
now = discord.utils.utcnow()
if dt < tracking_started:
base = tracking_started
else:
base = dt
days = (now - base).total_seconds() / 86400 # 86400 seconds in a day
if int(days) == 0:
return usages
return usages / days
class EmojiStats(db.Table, table_name='emoji_stats'):
id = db.Column(db.Integer(big=True, auto_increment=True), primary_key=True)
guild_id = db.Column(db.Integer(big=True), index=True)
emoji_id = db.Column(db.Integer(big=True), index=True)
total = db.Column(db.Integer, default=0)
@classmethod
def create_table(cls, *, exists_ok=True):
statement = super().create_table(exists_ok=exists_ok)
# create the indexes
sql = "CREATE UNIQUE INDEX IF NOT EXISTS emoji_stats_uniq_idx ON emoji_stats (guild_id, emoji_id);"
return statement + '\n' + sql
class Emoji(commands.Cog):
"""Custom emoji tracking"""
def __init__(self, bot):
self.bot = bot
self._batch_of_data = defaultdict(Counter)
self._batch_lock = asyncio.Lock(loop=bot.loop)
self.bulk_insert.add_exception_type(asyncpg.PostgresConnectionError)
self.bulk_insert.start()
@property
def display_emoji(self) -> discord.PartialEmoji:
return discord.PartialEmoji(name='\N{LOWER LEFT PAINTBRUSH}\ufe0f')
def cog_unload(self):
self.bulk_insert.stop()
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send(error)
@tasks.loop(seconds=60.0)
async def bulk_insert(self):
query = """INSERT INTO emoji_stats (guild_id, emoji_id, total)
SELECT x.guild, x.emoji, x.added
FROM jsonb_to_recordset($1::jsonb) AS x(guild BIGINT, emoji BIGINT, added INT)
ON CONFLICT (guild_id, emoji_id) DO UPDATE
SET total = emoji_stats.total + excluded.total;
"""
async with self._batch_lock:
transformed = [
{'guild': guild_id, 'emoji': emoji_id, 'added': count}
for guild_id, data in self._batch_of_data.items()
for emoji_id, count in data.items()
]
self._batch_of_data.clear()
await self.bot.pool.execute(query, transformed)
async def do_redirect(self, message):
if len(message.attachments) == 0:
return
data = io.BytesIO()
await message.attachments[0].save(data)
data.seek(0)
ch = self.bot.get_channel(305838206119575552)
if ch is not None:
fmt = f'Suggestion from {message.author}: {message.clean_content}'
await ch.send(fmt, file=discord.File(data, message.attachments[0].filename))
def find_all_emoji(self, message, *, regex=EMOJI_REGEX):
return regex.findall(message.content)
@commands.Cog.listener()
async def on_message(self, message):
if message.guild is None:
return
if message.author.bot:
return # no bots.
# handle the redirection from #suggestions
if message.channel.id == 295012914564169728:
return await self.do_redirect(message)
matches = EMOJI_REGEX.findall(message.content)
if not matches:
return
async with self._batch_lock:
self._batch_of_data[message.guild.id].update(map(int, matches))
@commands.Cog.listener()
async def on_guild_emojis_update(self, guild, before, after):
# we only care when an emoji is added
lookup = { e.id for e in before }
added = [e for e in after if e.id not in lookup and len(e.roles) == 0]
if len(added) == 0:
return
log.info('Server %s has added %s emojis.', guild, len(added))
if guild.id != BLOB_GUILD_ID:
return # not the guild we care about
# this is the backup channel
channel = self.bot.get_channel(305841865293430795)
if channel is None:
return
for emoji in added:
async with self.bot.session.get(emoji.url) as resp:
if resp.status != 200:
continue
data = io.BytesIO(await resp.read())
await channel.send(emoji.name, file=discord.File(data, f'{emoji.name}.png'))
await asyncio.sleep(1)
async def get_all_blob_stats(self, ctx):
blob_guild = self.bot.get_guild(BLOB_GUILD_ID)
blob_ids = {e.id: e for e in blob_guild.emojis if len(e.roles) == 0 }
query = "SELECT COALESCE(SUM(total), 0) FROM emoji_stats;"
total_usage = await ctx.db.fetchrow(query)
query = """SELECT emoji_id, COALESCE(SUM(total), 0) AS "Count"
FROM emoji_stats
WHERE emoji_id = ANY($1::bigint[])
GROUP BY emoji_id
ORDER BY "Count" DESC;
"""
blob_usage = await ctx.db.fetch(query, list(blob_ids.keys()))
e = discord.Embed(title='Blob Statistics', colour=0xf1c40f)
total_count = sum(r['Count'] for r in blob_usage)
global_usage = total_usage[0]
e.add_field(name='Total Usage', value=f'{total_count} ({total_count / global_usage:.2%})')
def elem_to_string(key, count):
elem = blob_ids.get(key)
per_day = usage_per_day(elem.created_at, count)
return f'{elem}: {count} times, {per_day:.2f}/day ({count / total_count:.2%})'
top = [elem_to_string(key, count) for key, count in blob_usage[0:7]]
bottom = [elem_to_string(key, count) for key, count in blob_usage[-7:]]
e.add_field(name='Most Common', value='\n'.join(top), inline=False)
e.add_field(name='Least Common', value='\n'.join(bottom), inline=False)
await ctx.send(embed=e)
async def get_stats_for(self, ctx, emoji):
e = discord.Embed(colour=0xf1c40f, title='Statistics')
query = """SELECT COALESCE(SUM(total), 0) AS "Count"
FROM emoji_stats
WHERE emoji_id=$1
GROUP BY emoji_id;
"""
usage = await ctx.db.fetchrow(query, emoji.id)
usage = usage[0]
e.add_field(name='Emoji', value=emoji)
e.add_field(name='Usage', value=f'{usage}, {usage_per_day(emoji.created_at, usage):.2f}/day')
await ctx.send(embed=e)
@commands.group(hidden=True, invoke_without_command=True)
async def blobstats(self, ctx, *, emoji: BlobEmoji = None):
"""Usage statistics of blobs."""
if emoji is None:
await self.get_all_blob_stats(ctx)
else:
await self.get_stats_for(ctx, emoji)
@commands.command(aliases=['blobpost'], hidden=True)
@checks.is_in_guilds(BLOB_GUILD_ID)
@checks.is_admin()
async def blobsort(self, ctx):
"""Sorts the blob post."""
emojis = sorted([e.name for e in ctx.guild.emojis if len(e.roles) == 0])
fp = io.BytesIO()
pages = [emojis[i:i + 30] for i in range(0, len(emojis), 30)]
for number, page in enumerate(pages, 1):
fmt = f'Page {number}\n'
fp.write(fmt.encode('utf-8'))
for emoji in page:
fmt = f':{emoji}: = `:{emoji}:`\n'
fp.write(fmt.encode('utf-8'))
fp.write(b'\n')
fp.seek(0)
await ctx.send(file=discord.File(fp, 'blob_posts.txt'))
def emoji_fmt(self, emoji_id, count, total):
emoji = self.bot.get_emoji(emoji_id)
if emoji is None:
name = f'[\N{WHITE QUESTION MARK ORNAMENT}](https://cdn.discordapp.com/emojis/{emoji_id}.png)'
emoji = discord.Object(id=emoji_id)
else:
name = str(emoji)
per_day = usage_per_day(emoji.created_at, count)
p = count / total
return f'{name}: {count} uses ({p:.1%}), {per_day:.1f} uses/day.'
async def get_guild_stats(self, ctx):
e = discord.Embed(title='Emoji Leaderboard', colour=discord.Colour.blurple())
query = """SELECT
COALESCE(SUM(total), 0) AS "Count",
COUNT(*) AS "Emoji"
FROM emoji_stats
WHERE guild_id=$1
GROUP BY guild_id;
"""
record = await ctx.db.fetchrow(query, ctx.guild.id)
if record is None:
return await ctx.send('This server has no emoji stats...')
total = record['Count']
emoji_used = record['Emoji']
per_day = usage_per_day(ctx.me.joined_at, total)
e.set_footer(text=f'{total} uses over {emoji_used} emoji for {per_day:.2f} uses per day.')
query = """SELECT emoji_id, total
FROM emoji_stats
WHERE guild_id=$1
ORDER BY total DESC
LIMIT 10;
"""
top = await ctx.db.fetch(query, ctx.guild.id)
e.description = '\n'.join(f'{i}. {self.emoji_fmt(emoji, count, total)}' for i, (emoji, count) in enumerate(top, 1))
await ctx.send(embed=e)
async def get_emoji_stats(self, ctx, emoji_id):
e = discord.Embed(title='Emoji Stats')
cdn = f'https://cdn.discordapp.com/emojis/{emoji_id}.png'
# first verify it's a real ID
async with ctx.session.get(cdn) as resp:
if resp.status == 404:
e.description = "This isn't a valid emoji."
e.set_thumbnail(url='https://this.is-serious.business/09e106.jpg')
return await ctx.send(embed=e)
e.set_thumbnail(url=cdn)
# valid emoji ID so let's use it
query = """SELECT guild_id, SUM(total) AS "Count"
FROM emoji_stats
WHERE emoji_id=$1
GROUP BY guild_id;
"""
records = await ctx.db.fetch(query, emoji_id)
transformed = {k: v for k, v in records}
total = sum(transformed.values())
dt = discord.utils.snowflake_time(emoji_id)
# get the stats for this guild in particular
try:
count = transformed[ctx.guild.id]
per_day = usage_per_day(dt, count)
value = f'{count} uses ({count / total:.2%} of global uses), {per_day:.2f} uses/day'
except KeyError:
value = 'Not used here.'
e.add_field(name='Server Stats', value=value, inline=False)
# global stats
per_day = usage_per_day(dt, total)
value = f'{total} uses, {per_day:.2f} uses/day'
e.add_field(name='Global Stats', value=value, inline=False)
e.set_footer(text='These statistics are for servers I am in')
await ctx.send(embed=e)
@commands.group(invoke_without_command=True)
@commands.guild_only()
async def emojistats(self, ctx, *, emoji: partial_emoji = None):
"""Shows you statistics about the emoji usage in this server.
If no emoji is given, then it gives you the top 10 emoji used.
"""
if emoji is None:
await self.get_guild_stats(ctx)
else:
await self.get_emoji_stats(ctx, emoji)
@emojistats.command(name='server', aliases=['guild'])
@commands.guild_only()
async def emojistats_guild(self, ctx):
"""Shows you statistics about the local server emojis in this server."""
emoji_ids = [e.id for e in ctx.guild.emojis]
if not emoji_ids:
await ctx.send('This guild has no custom emoji.')
query = """SELECT emoji_id, total
FROM emoji_stats
WHERE guild_id=$1 AND emoji_id = ANY($2::bigint[])
ORDER BY total DESC
"""
e = discord.Embed(title='Emoji Leaderboard', colour=discord.Colour.blurple())
records = await ctx.db.fetch(query, ctx.guild.id, emoji_ids)
total = sum(a for _, a in records)
emoji_used = len(records)
per_day = usage_per_day(ctx.me.joined_at, total)
e.set_footer(text=f'{total} uses over {emoji_used} emoji for {per_day:.2f} uses per day.')
top = records[:10]
value = '\n'.join(self.emoji_fmt(emoji, count, total) for (emoji, count) in top)
e.add_field(name=f'Top {len(top)}', value=value or 'Nothing...')
record_count = len(records)
if record_count > 10:
bottom = records[-10:] if record_count >= 20 else records[-record_count + 10:]
value = '\n'.join(self.emoji_fmt(emoji, count, total) for (emoji, count) in bottom)
e.add_field(name=f'Bottom {len(bottom)}', value=value)
await ctx.send(embed=e)
@commands.group(name='emoji')
@commands.guild_only()
@checks.has_guild_permissions(manage_emoji=True)
async def _emoji(self, ctx):
"""Emoji management commands."""
if ctx.subcommand_passed is None:
await ctx.send_help(ctx.command)
@_emoji.command(name='create')
async def _emoji_create(self, ctx, name: emoji_name, *, emoji: EmojiURL):
"""Create an emoji for the server under the given name.
You must have Manage Emoji permission to use this.
The bot must have this permission too.
"""
if not ctx.me.guild_permissions.manage_emojis:
return await ctx.send('Bot does not have permission to add emoji.')
reason = f'Action done by {ctx.author} (ID: {ctx.author.id})'
emoji_count = sum(e.animated == emoji.animated for e in ctx.guild.emojis)
if emoji_count >= ctx.guild.emoji_limit:
return await ctx.send('There are no more emoji slots in this server.')
async with self.bot.session.get(emoji.url) as resp:
if resp.status >= 400:
return await ctx.send('Could not fetch the image.')
if int(resp.headers['Content-Length']) >= (256 * 1024):
return await ctx.send('Image is too big.')
data = await resp.read()
coro = ctx.guild.create_custom_emoji(name=name, image=data, reason=reason)
async with ctx.typing():
try:
created = await asyncio.wait_for(coro, timeout=10.0)
except asyncio.TimeoutError:
return await ctx.send('Sorry, the bot is rate limited or it took too long.')
except discord.HTTPException as e:
return await ctx.send(f'Failed to create emoji somehow: {e}')
else:
return await ctx.send(f'Created {created}')
def setup(bot):
bot.add_cog(Emoji(bot))
|
|
from unittest import TestCase, main
from socketio.namespace import BaseNamespace
from socketio.virtsocket import Socket
from mock import MagicMock
class MockSocketIOServer(object):
"""Mock a SocketIO server"""
def __init__(self, *args, **kwargs):
self.sockets = {}
def get_socket(self, socket_id=''):
return self.sockets.get(socket_id)
class MockSocket(Socket):
pass
class ChatNamespace(BaseNamespace):
def __init__(self, *args, **kwargs):
self.use_set = args[0]
super(ChatNamespace, self).__init__(*args[1:], **kwargs)
def get_initial_acl(self):
acls = ['on_foo']
if self.use_set == True:
return set(acls)
else:
return acls
def on_foo(self):
return 'a'
def on_bar(self):
return 'b'
def on_baz(foo, bar, baz):
return 'c'
class GlobalNamespace(BaseNamespace):
def on_woot(self):
return ''
def on_tobi(self):
return ''
class TestBaseNamespace(TestCase):
def setUp(self):
server = MockSocketIOServer()
self.environ = {}
socket = MockSocket(server, {})
socket.error = MagicMock()
self.environ['socketio'] = socket
self.ns = GlobalNamespace(self.environ, '/woot')
def test_process_packet_disconnect(self):
pkt = {'type': 'disconnect',
'endpoint': '/woot'
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_connect(self):
"""processing a connection packet """
pkt = {'type': 'connect',
'endpoint': '/tobi',
'qs': ''
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
# processing a connection packet with query string
pkt = {'type': 'connect',
'endpoint': '/test',
'qs': '?test=1'
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_heartbeat(self):
"""processing a heartbeat packet """
pkt = {'type': 'heartbeat',
'endpoint': ''
}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_message(self):
"""processing a message packet """
pkt = {'type': 'message',
'endpoint': '',
'data': 'woot'}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
# processing a message packet with id and endpoint
pkt = {'type': 'message',
'id': 5,
'ack': True,
'endpoint': '/tobi',
'data': ''}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
def test_process_packet_json(self):
"""processing json packet """
pkt = {'type': 'json',
'endpoint': '',
'data': '2'}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
# processing json packet with message id and ack data
pkt = {'type': 'json',
'id': 1,
'endpoint': '',
'ack': 'data',
'data': {u'a': u'b'}}
data = self.ns.process_packet(pkt)
self.assertEqual(data, pkt['data'])
assert not self.environ['socketio'].error.called
def test_process_packet_event(self):
"""processing an event packet """
pkt = {'type': 'event',
'name': 'woot',
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
# processing an event packet with message id and ack
pkt = {'type': 'event',
'id': 1,
'ack': 'data',
'name': 'tobi',
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_ack(self):
"""processing a ack packet """
pkt = {'type': 'ack',
'ackId': 140,
'endpoint': '',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_process_packet_error(self):
"""processing error packet """
pkt = {'type': 'error',
'reason': '',
'advice': '',
'endpoint': ''}
self.ns.process_packet(pkt)
pkt = {'type': 'error',
'reason': 'transport not supported',
'advice': '',
'endpoint': ''}
self.ns.process_packet(pkt)
# processing error packet with reason and advice
pkt = {'type': 'error',
'reason': 'unauthorized',
'advice': 'reconnect',
'endpoint': ''}
self.ns.process_packet(pkt)
# processing error packet with endpoint
pkt = {'type': 'error',
'reason': '',
'advice': '',
'endpoint': '/woot'}
self.ns.process_packet(pkt)
def test_process_packet_message_with_new_line(self):
"""processing a newline in a message"""
pkt = {'type': 'message',
'data': '\n',
'endpoint': ''}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_del_acl_method(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
message = ("Trying to delete an ACL method, but none were"
+ " defined yet! Or: No ACL restrictions yet, why would you"
+ " delete one?")
try:
self.ns.del_acl_method('on_foo')
self.ns.process_packet(pkt)
except ValueError as e:
self.assertEqual(
message,
e.args[0],
)
else:
raise Exception("""We should not be able to delete an acl that
doesn't exist""")
def test_allowed_event_name_regex(self):
pkt = {'type': 'event',
'name': '$foo',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
args = ['unallowed_event_name',
'name must only contains alpha numerical characters',
]
kwargs = dict(msg_id=None, endpoint='/woot', quiet=False)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_method_not_found(self):
""" test calling a method that doesn't exist """
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []
}
self.ns.process_packet(pkt)
kwargs = dict(
msg_id=None,
endpoint='/woot',
quiet=False
)
self.environ['socketio'].error.assert_called_with(
'no_such_method',
'The method "%s" was not found' % 'on_foo',
**kwargs
)
class TestChatNamespace(TestCase):
def setUp(self):
server = MockSocketIOServer()
self.environ = {}
socket = MockSocket(server, {})
socket.error = MagicMock()
self.environ['socketio'] = socket
self.ns = ChatNamespace(
False,
self.environ,
'/chat'
)
def test_allowed_event(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_blocked_event(self):
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt)
args = [
'method_access_denied',
'You do not have access to method "on_bar"',
]
kwargs = dict(
msg_id=None,
endpoint='/chat',
quiet=False
)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_add_acl_method(self):
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_bar')
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_del_acl_method(self):
pkt = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.del_acl_method('on_foo')
self.ns.process_packet(pkt)
args = [
'method_access_denied',
'You do not have access to method "on_foo"',
]
kwargs = dict(
msg_id=None,
endpoint='/chat',
quiet=False
)
self.environ['socketio'].error.assert_called_with(*args, **kwargs)
def test_lift_acl_restrictions(self):
pkt1 = {'type': 'event',
'name': 'foo',
'endpoint': '/chat',
'args': []}
self.ns.lift_acl_restrictions()
self.ns.process_packet(pkt1)
assert not self.environ['socketio'].error.called
pkt2 = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.process_packet(pkt2)
assert not self.environ['socketio'].error.called
def test_use_set_on_acl(self):
self.ns = ChatNamespace(
True,
self.environ,
'/chat'
)
pkt = {'type': 'event',
'name': 'bar',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_bar')
self.ns.process_packet(pkt)
assert not self.environ['socketio'].error.called
def test_call_method_invalid_definition(self):
pkt = {'type': 'event',
'name': 'baz',
'endpoint': '/chat',
'args': []}
self.ns.add_acl_method('on_baz')
self.ns.process_packet(pkt)
kwargs = dict(msg_id=None, endpoint='/chat', quiet=False)
self.environ['socketio'].error.assert_called_with(
"invalid_method_args",
"The server-side method is invalid, as it doesn't "
"have 'self' as its first argument"
, **kwargs)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""
Apply a "Security Group" to the members of an etcd cluster.
Usage: apply-firewall.py
"""
import os
import re
import string
import argparse
from threading import Thread
import uuid
import colorama
from colorama import Fore, Style
import paramiko
import requests
import sys
import yaml
def get_nodes_from_args(args):
if args.discovery_url is not None:
return get_nodes_from_discovery_url(args.discovery_url)
return get_nodes_from_discovery_url(get_discovery_url_from_user_data())
def get_nodes_from_discovery_url(discovery_url):
try:
nodes = []
json = requests.get(discovery_url).json()
discovery_nodes = json['node']['nodes']
for node in discovery_nodes:
value = node['value']
ip = re.search('([0-9]{1,3}\.){3}[0-9]{1,3}', value).group(0)
nodes.append(ip)
return nodes
except:
raise IOError('Could not load nodes from discovery url ' + discovery_url)
def get_discovery_url_from_user_data():
name = 'linode-user-data.yaml'
log_info('Loading discovery url from ' + name)
try:
current_dir = os.path.dirname(__file__)
user_data_file = file(os.path.abspath(os.path.join(current_dir, name)), 'r')
user_data_yaml = yaml.safe_load(user_data_file)
return user_data_yaml['coreos']['etcd2']['discovery']
except:
raise IOError('Could not load discovery url from ' + name)
def validate_ip_address(ip):
return True if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}', ip) else False
def get_firewall_contents(node_ips, private=False):
rules_template_text = """*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
:DOCKER - [0:0]
:Firewall-INPUT - [0:0]
-A INPUT -j Firewall-INPUT
-A FORWARD -j Firewall-INPUT
-A Firewall-INPUT -i lo -j ACCEPT
-A Firewall-INPUT -p icmp --icmp-type echo-reply -j ACCEPT
-A Firewall-INPUT -p icmp --icmp-type destination-unreachable -j ACCEPT
-A Firewall-INPUT -p icmp --icmp-type time-exceeded -j ACCEPT
# Ping
-A Firewall-INPUT -p icmp --icmp-type echo-request -j ACCEPT
# Accept any established connections
-A Firewall-INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
# Enable the traffic between the nodes of the cluster
-A Firewall-INPUT -s $node_ips -j ACCEPT
# Allow connections from docker container
-A Firewall-INPUT -i docker0 -j ACCEPT
# Accept ssh, http, https and git
-A Firewall-INPUT -m conntrack --ctstate NEW -m multiport$multiport_private -p tcp --dports 22,2222,80,443 -j ACCEPT
# Log and drop everything else
-A Firewall-INPUT -j REJECT
COMMIT
"""
multiport_private = ' -s 192.168.0.0/16' if private else ''
rules_template = string.Template(rules_template_text)
return rules_template.substitute(node_ips=string.join(node_ips, ','), multiport_private=multiport_private)
def apply_rules_to_all(host_ips, rules, private_key):
pkey = detect_and_create_private_key(private_key)
threads = []
for ip in host_ips:
t = Thread(target=apply_rules, args=(ip, rules, pkey))
t.setDaemon(False)
t.start()
threads.append(t)
for thread in threads:
thread.join()
def detect_and_create_private_key(private_key):
private_key_text = private_key.read()
private_key.seek(0)
if '-----BEGIN RSA PRIVATE KEY-----' in private_key_text:
return paramiko.RSAKey.from_private_key(private_key)
elif '-----BEGIN DSA PRIVATE KEY-----' in private_key_text:
return paramiko.DSSKey.from_private_key(private_key)
else:
raise ValueError('Invalid private key file ' + private_key.name)
def apply_rules(host_ip, rules, private_key):
# connect to the server via ssh
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host_ip, username='core', allow_agent=False, look_for_keys=False, pkey=private_key)
# copy the rules to the temp directory
temp_file = '/tmp/' + str(uuid.uuid4())
ssh.open_sftp()
sftp = ssh.open_sftp()
sftp.open(temp_file, 'w').write(rules)
# move the rules in to place and enable and run the iptables-restore.service
commands = [
'sudo mv ' + temp_file + ' /var/lib/iptables/rules-save',
'sudo chown root:root /var/lib/iptables/rules-save',
'sudo systemctl enable iptables-restore.service',
'sudo systemctl start iptables-restore.service'
]
for command in commands:
stdin, stdout, stderr = ssh.exec_command(command)
stdout.channel.recv_exit_status()
ssh.close()
log_success('Applied rule to ' + host_ip)
def main():
colorama.init()
parser = argparse.ArgumentParser(description='Apply a "Security Group" to a Deis cluster')
parser.add_argument('--private-key', required=True, type=file, dest='private_key', help='Cluster SSH Private Key')
parser.add_argument('--private', action='store_true', dest='private', help='Only allow access to the cluster from the private network')
parser.add_argument('--discovery-url', dest='discovery_url', help='Etcd discovery url')
parser.add_argument('--hosts', nargs='+', dest='hosts', help='The IP addresses of the hosts to apply rules to')
args = parser.parse_args()
nodes = get_nodes_from_args(args)
hosts = args.hosts if args.hosts is not None else nodes
node_ips = []
for ip in nodes:
if validate_ip_address(ip):
node_ips.append(ip)
else:
log_warning('Invalid IP will not be added to security group: ' + ip)
if not len(node_ips) > 0:
raise ValueError('No valid IP addresses in security group.')
host_ips = []
for ip in hosts:
if validate_ip_address(ip):
host_ips.append(ip)
else:
log_warning('Host has invalid IP address: ' + ip)
if not len(host_ips) > 0:
raise ValueError('No valid host addresses.')
log_info('Generating iptables rules...')
rules = get_firewall_contents(node_ips, args.private)
log_success('Generated rules:')
log_debug(rules)
log_info('Applying rules...')
apply_rules_to_all(host_ips, rules, args.private_key)
log_success('Done!')
def log_debug(message):
print(Style.DIM + Fore.MAGENTA + message + Fore.RESET + Style.RESET_ALL)
def log_info(message):
print(Fore.CYAN + message + Fore.RESET)
def log_warning(message):
print(Fore.YELLOW + message + Fore.RESET)
def log_success(message):
print(Style.BRIGHT + Fore.GREEN + message + Fore.RESET + Style.RESET_ALL)
def log_error(message):
print(Style.BRIGHT + Fore.RED + message + Fore.RESET + Style.RESET_ALL)
if __name__ == "__main__":
try:
main()
except Exception as e:
log_error(e.message)
sys.exit(1)
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.names.client}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyClass, verifyObject
from twisted.python import failure
from twisted.python.filepath import FilePath
from twisted.python.runtime import platform
from twisted.internet import defer
from twisted.internet.error import CannotListenError, ConnectionRefusedError
from twisted.internet.interfaces import IResolver
from twisted.internet.test.modulehelpers import AlternateReactor
from twisted.internet.task import Clock
from twisted.names import error, client, dns, hosts, cache
from twisted.names.error import DNSQueryTimeoutError
from twisted.names.common import ResolverBase
from twisted.names.test.test_hosts import GoodTempPathMixin
from twisted.names.test.test_util import MemoryReactor
from twisted.test import proto_helpers
from twisted.trial import unittest
if platform.isWindows():
windowsSkip = "These tests need more work before they'll work on Windows."
else:
windowsSkip = None
class FakeResolver(ResolverBase):
def _lookup(self, name, cls, qtype, timeout):
"""
The getHostByNameTest does a different type of query that requires it
return an A record from an ALL_RECORDS lookup, so we accommodate that
here.
"""
if name == b'getHostByNameTest':
rr = dns.RRHeader(name=name, type=dns.A, cls=cls, ttl=60,
payload=dns.Record_A(address='127.0.0.1', ttl=60))
else:
rr = dns.RRHeader(name=name, type=qtype, cls=cls, ttl=60)
results = [rr]
authority = []
additional = []
return defer.succeed((results, authority, additional))
class StubPort(object):
"""
A partial implementation of L{IListeningPort} which only keeps track of
whether it has been stopped.
@ivar disconnected: A C{bool} which is C{False} until C{stopListening} is
called, C{True} afterwards.
"""
disconnected = False
def stopListening(self):
self.disconnected = True
class StubDNSDatagramProtocol(object):
"""
L{dns.DNSDatagramProtocol}-alike.
@ivar queries: A C{list} of tuples giving the arguments passed to
C{query} along with the L{defer.Deferred} which was returned from
the call.
"""
def __init__(self):
self.queries = []
self.transport = StubPort()
def query(self, address, queries, timeout=10, id=None):
"""
Record the given arguments and return a Deferred which will not be
called back by this code.
"""
result = defer.Deferred()
self.queries.append((address, queries, timeout, id, result))
return result
class GetResolverTests(unittest.TestCase):
"""
Tests for L{client.getResolver}.
"""
if windowsSkip:
skip = windowsSkip
def test_interface(self):
"""
L{client.getResolver} returns an object providing L{IResolver}.
"""
with AlternateReactor(Clock()):
resolver = client.getResolver()
self.assertTrue(verifyObject(IResolver, resolver))
def test_idempotent(self):
"""
Multiple calls to L{client.getResolver} return the same L{IResolver}
implementation.
"""
with AlternateReactor(Clock()):
a = client.getResolver()
b = client.getResolver()
self.assertIs(a, b)
class CreateResolverTests(unittest.TestCase, GoodTempPathMixin):
"""
Tests for L{client.createResolver}.
"""
if windowsSkip:
skip = windowsSkip
def _hostsTest(self, resolver, filename):
res = [r for r in resolver.resolvers if isinstance(r, hosts.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual(res[0].file, filename)
def test_defaultHosts(self):
"""
L{client.createResolver} returns a L{resolve.ResolverChain} including a
L{hosts.Resolver} using I{/etc/hosts} if no alternate hosts file is
specified.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver()
self._hostsTest(resolver, b"/etc/hosts")
def test_overrideHosts(self):
"""
The I{hosts} parameter to L{client.createResolver} overrides the hosts
file used by the L{hosts.Resolver} in the L{resolve.ResolverChain} it
returns.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver(hosts=b"/foo/bar")
self._hostsTest(resolver, b"/foo/bar")
def _resolvConfTest(self, resolver, filename):
"""
Verify that C{resolver} has a L{client.Resolver} with a configuration
filename set to C{filename}.
"""
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual(res[0].resolv, filename)
def test_reactor(self):
"""
The L{client.Resolver} included in the L{resolve.ResolverChain} returned
by L{client.createResolver} uses the global reactor.
"""
reactor = Clock()
with AlternateReactor(reactor):
resolver = client.createResolver()
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertIs(reactor, res[0]._reactor)
def test_defaultResolvConf(self):
"""
L{client.createResolver} returns a L{resolve.ResolverChain} including a
L{client.Resolver} using I{/etc/resolv.conf} if no alternate resolver
configuration file is specified.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver()
self._resolvConfTest(resolver, b"/etc/resolv.conf")
def test_overrideResolvConf(self):
"""
The I{resolvconf} parameter to L{client.createResolver} overrides the
resolver configuration file used by the L{client.Resolver} in the
L{resolve.ResolverChain} it returns.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver(resolvconf=b"/foo/bar")
self._resolvConfTest(resolver, b"/foo/bar")
def test_defaultServers(self):
"""
If no servers are given, addresses are taken from the file given by the
I{resolvconf} parameter to L{client.createResolver}.
"""
resolvconf = self.path()
resolvconf.setContent(b"nameserver 127.1.2.3\n")
with AlternateReactor(Clock()):
resolver = client.createResolver(resolvconf=resolvconf.path)
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual([], res[0].servers)
self.assertEqual([("127.1.2.3", 53)], res[0].dynServers)
def test_overrideServers(self):
"""
Servers passed to L{client.createResolver} are used in addition to any
found in the file given by the I{resolvconf} parameter.
"""
resolvconf = self.path()
resolvconf.setContent(b"nameserver 127.1.2.3\n")
with AlternateReactor(Clock()):
resolver = client.createResolver(
servers=[("127.3.2.1", 53)], resolvconf=resolvconf.path)
res = [r for r in resolver.resolvers if isinstance(r, client.Resolver)]
self.assertEqual(1, len(res))
self.assertEqual([("127.3.2.1", 53)], res[0].servers)
self.assertEqual([("127.1.2.3", 53)], res[0].dynServers)
def test_cache(self):
"""
L{client.createResolver} returns a L{resolve.ResolverChain} including a
L{cache.CacheResolver}.
"""
with AlternateReactor(Clock()):
resolver = client.createResolver()
res = [r for r in resolver.resolvers if isinstance(r, cache.CacheResolver)]
self.assertEqual(1, len(res))
class ResolverTests(unittest.TestCase):
"""
Tests for L{client.Resolver}.
"""
def test_clientProvidesIResolver(self):
"""
L{client} provides L{IResolver} through a series of free
functions.
"""
verifyObject(IResolver, client)
def test_clientResolverProvidesIResolver(self):
"""
L{client.Resolver} provides L{IResolver}.
"""
verifyClass(IResolver, client.Resolver)
def test_noServers(self):
"""
L{client.Resolver} raises L{ValueError} if constructed with neither
servers nor a nameserver configuration file.
"""
self.assertRaises(ValueError, client.Resolver)
def test_missingConfiguration(self):
"""
A missing nameserver configuration file results in no server information
being loaded from it (ie, not an exception) and a default server being
provided.
"""
resolver = client.Resolver(resolv=self.mktemp(), reactor=Clock())
self.assertEqual([("127.0.0.1", 53)], resolver.dynServers)
def test_closesResolvConf(self):
"""
As part of its constructor, C{StubResolver} opens C{/etc/resolv.conf};
then, explicitly closes it and does not count on the GC to do so for
it.
"""
handle = FilePath(self.mktemp())
resolvConf = handle.open(mode='w+')
class StubResolver(client.Resolver):
def _openFile(self, name):
return resolvConf
StubResolver(servers=["example.com", 53], resolv='/etc/resolv.conf',
reactor=Clock())
self.assertTrue(resolvConf.closed)
def test_domainEmptyArgument(self):
"""
L{client.Resolver.parseConfig} treats a I{domain} line without an
argument as indicating a domain of C{b""}.
"""
resolver = client.Resolver(servers=[("127.0.0.1", 53)])
resolver.parseConfig([b"domain\n"])
self.assertEqual(b"", resolver.domain)
def test_searchEmptyArgument(self):
"""
L{client.Resolver.parseConfig} treats a I{search} line without an
argument as indicating an empty search suffix.
"""
resolver = client.Resolver(servers=[("127.0.0.1", 53)])
resolver.parseConfig([b"search\n"])
self.assertEqual([], resolver.search)
def test_datagramQueryServerOrder(self):
"""
L{client.Resolver.queryUDP} should issue queries to its
L{dns.DNSDatagramProtocol} with server addresses taken from its own
C{servers} and C{dynServers} lists, proceeding through them in order
as L{DNSQueryTimeoutError}s occur.
"""
protocol = StubDNSDatagramProtocol()
servers = [object(), object()]
dynServers = [object(), object()]
resolver = client.Resolver(servers=servers)
resolver.dynServers = dynServers
resolver._connectedProtocol = lambda: protocol
expectedResult = object()
queryResult = resolver.queryUDP(None)
queryResult.addCallback(self.assertEqual, expectedResult)
self.assertEqual(len(protocol.queries), 1)
self.assertIs(protocol.queries[0][0], servers[0])
protocol.queries[0][-1].errback(DNSQueryTimeoutError(0))
self.assertEqual(len(protocol.queries), 2)
self.assertIs(protocol.queries[1][0], servers[1])
protocol.queries[1][-1].errback(DNSQueryTimeoutError(1))
self.assertEqual(len(protocol.queries), 3)
self.assertIs(protocol.queries[2][0], dynServers[0])
protocol.queries[2][-1].errback(DNSQueryTimeoutError(2))
self.assertEqual(len(protocol.queries), 4)
self.assertIs(protocol.queries[3][0], dynServers[1])
protocol.queries[3][-1].callback(expectedResult)
return queryResult
def test_singleConcurrentRequest(self):
"""
L{client.Resolver.query} only issues one request at a time per query.
Subsequent requests made before responses to prior ones are received
are queued and given the same response as is given to the first one.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
query = dns.Query(b'foo.example.com', dns.A, dns.IN)
# The first query should be passed to the underlying protocol.
firstResult = resolver.query(query)
self.assertEqual(len(queries), 1)
# The same query again should not be passed to the underlying protocol.
secondResult = resolver.query(query)
self.assertEqual(len(queries), 1)
# The response to the first query should be sent in response to both
# queries.
answer = object()
response = dns.Message()
response.answers.append(answer)
queries.pop()[-1].callback(response)
d = defer.gatherResults([firstResult, secondResult])
def cbFinished(responses):
firstResponse, secondResponse = responses
self.assertEqual(firstResponse, ([answer], [], []))
self.assertEqual(secondResponse, ([answer], [], []))
d.addCallback(cbFinished)
return d
def test_multipleConcurrentRequests(self):
"""
L{client.Resolver.query} issues a request for each different concurrent
query.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
# The first query should be passed to the underlying protocol.
firstQuery = dns.Query(b'foo.example.com', dns.A)
resolver.query(firstQuery)
self.assertEqual(len(queries), 1)
# A query for a different name is also passed to the underlying
# protocol.
secondQuery = dns.Query(b'bar.example.com', dns.A)
resolver.query(secondQuery)
self.assertEqual(len(queries), 2)
# A query for a different type is also passed to the underlying
# protocol.
thirdQuery = dns.Query(b'foo.example.com', dns.A6)
resolver.query(thirdQuery)
self.assertEqual(len(queries), 3)
def test_multipleSequentialRequests(self):
"""
After a response is received to a query issued with
L{client.Resolver.query}, another query with the same parameters
results in a new network request.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
query = dns.Query(b'foo.example.com', dns.A)
# The first query should be passed to the underlying protocol.
resolver.query(query)
self.assertEqual(len(queries), 1)
# Deliver the response.
queries.pop()[-1].callback(dns.Message())
# Repeating the first query should touch the protocol again.
resolver.query(query)
self.assertEqual(len(queries), 1)
def test_multipleConcurrentFailure(self):
"""
If the result of a request is an error response, the Deferreds for all
concurrently issued requests associated with that result fire with the
L{Failure}.
"""
protocol = StubDNSDatagramProtocol()
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._connectedProtocol = lambda: protocol
queries = protocol.queries
query = dns.Query(b'foo.example.com', dns.A)
firstResult = resolver.query(query)
secondResult = resolver.query(query)
class ExpectedException(Exception):
pass
queries.pop()[-1].errback(failure.Failure(ExpectedException()))
return defer.gatherResults([
self.assertFailure(firstResult, ExpectedException),
self.assertFailure(secondResult, ExpectedException)])
def test_connectedProtocol(self):
"""
L{client.Resolver._connectedProtocol} returns a new
L{DNSDatagramProtocol} connected to a new address with a
cryptographically secure random port number.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
firstProto = resolver._connectedProtocol()
secondProto = resolver._connectedProtocol()
self.assertIsNotNone(firstProto.transport)
self.assertIsNotNone(secondProto.transport)
self.assertNotEqual(
firstProto.transport.getHost().port,
secondProto.transport.getHost().port)
return defer.gatherResults([
defer.maybeDeferred(firstProto.transport.stopListening),
defer.maybeDeferred(secondProto.transport.stopListening)])
def test_resolverUsesOnlyParameterizedReactor(self):
"""
If a reactor instance is supplied to L{client.Resolver}
L{client.Resolver._connectedProtocol} should pass that reactor
to L{twisted.names.dns.DNSDatagramProtocol}.
"""
reactor = MemoryReactor()
resolver = client.Resolver(resolv=self.mktemp(), reactor=reactor)
proto = resolver._connectedProtocol()
self.assertIs(proto._reactor, reactor)
def test_differentProtocol(self):
"""
L{client.Resolver._connectedProtocol} is called once each time a UDP
request needs to be issued and the resulting protocol instance is used
for that request.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return defer.succeed(dns.Message())
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
resolver.query(dns.Query(b'bar.example.com'))
self.assertEqual(len(set(protocols)), 2)
def test_disallowedPort(self):
"""
If a port number is initially selected which cannot be bound, the
L{CannotListenError} is handled and another port number is attempted.
"""
ports = []
class FakeReactor(object):
def listenUDP(self, port, *args):
ports.append(port)
if len(ports) == 1:
raise CannotListenError(None, port, None)
resolver = client.Resolver(servers=[('example.com', 53)])
resolver._reactor = FakeReactor()
resolver._connectedProtocol()
self.assertEqual(len(set(ports)), 2)
def test_differentProtocolAfterTimeout(self):
"""
When a query issued by L{client.Resolver.query} times out, the retry
uses a new protocol instance.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
results = [defer.fail(failure.Failure(DNSQueryTimeoutError(None))),
defer.succeed(dns.Message())]
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return results.pop(0)
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
self.assertEqual(len(set(protocols)), 2)
def test_protocolShutDown(self):
"""
After the L{Deferred} returned by L{DNSDatagramProtocol.query} is
called back, the L{DNSDatagramProtocol} is disconnected from its
transport.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
result = defer.Deferred()
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return result
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
self.assertFalse(protocols[0].transport.disconnected)
result.callback(dns.Message())
self.assertTrue(protocols[0].transport.disconnected)
def test_protocolShutDownAfterTimeout(self):
"""
The L{DNSDatagramProtocol} created when an interim timeout occurs is
also disconnected from its transport after the Deferred returned by its
query method completes.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
result = defer.Deferred()
results = [defer.fail(failure.Failure(DNSQueryTimeoutError(None))),
result]
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return results.pop(0)
resolver._connectedProtocol = FakeProtocol
resolver.query(dns.Query(b'foo.example.com'))
self.assertFalse(protocols[1].transport.disconnected)
result.callback(dns.Message())
self.assertTrue(protocols[1].transport.disconnected)
def test_protocolShutDownAfterFailure(self):
"""
If the L{Deferred} returned by L{DNSDatagramProtocol.query} fires with
a failure, the L{DNSDatagramProtocol} is still disconnected from its
transport.
"""
class ExpectedException(Exception):
pass
resolver = client.Resolver(servers=[('example.com', 53)])
protocols = []
result = defer.Deferred()
class FakeProtocol(object):
def __init__(self):
self.transport = StubPort()
def query(self, address, query, timeout=10, id=None):
protocols.append(self)
return result
resolver._connectedProtocol = FakeProtocol
queryResult = resolver.query(dns.Query(b'foo.example.com'))
self.assertFalse(protocols[0].transport.disconnected)
result.errback(failure.Failure(ExpectedException()))
self.assertTrue(protocols[0].transport.disconnected)
return self.assertFailure(queryResult, ExpectedException)
def test_tcpDisconnectRemovesFromConnections(self):
"""
When a TCP DNS protocol associated with a Resolver disconnects, it is
removed from the Resolver's connection list.
"""
resolver = client.Resolver(servers=[('example.com', 53)])
protocol = resolver.factory.buildProtocol(None)
protocol.makeConnection(None)
self.assertIn(protocol, resolver.connections)
# Disconnecting should remove the protocol from the connection list:
protocol.connectionLost(None)
self.assertNotIn(protocol, resolver.connections)
def test_singleTCPQueryErrbackOnConnectionFailure(self):
"""
The deferred returned by L{client.Resolver.queryTCP} will
errback when the TCP connection attempt fails. The reason for
the connection failure is passed as the argument to errback.
"""
reactor = proto_helpers.MemoryReactor()
resolver = client.Resolver(
servers=[('192.0.2.100', 53)],
reactor=reactor)
d = resolver.queryTCP(dns.Query('example.com'))
host, port, factory, timeout, bindAddress = reactor.tcpClients[0]
class SentinelException(Exception):
pass
factory.clientConnectionFailed(
reactor.connectors[0], failure.Failure(SentinelException()))
self.failureResultOf(d, SentinelException)
def test_multipleTCPQueryErrbackOnConnectionFailure(self):
"""
All pending L{resolver.queryTCP} C{deferred}s will C{errback}
with the same C{Failure} if the connection attempt fails.
"""
reactor = proto_helpers.MemoryReactor()
resolver = client.Resolver(
servers=[('192.0.2.100', 53)],
reactor=reactor)
d1 = resolver.queryTCP(dns.Query('example.com'))
d2 = resolver.queryTCP(dns.Query('example.net'))
host, port, factory, timeout, bindAddress = reactor.tcpClients[0]
class SentinelException(Exception):
pass
factory.clientConnectionFailed(
reactor.connectors[0], failure.Failure(SentinelException()))
f1 = self.failureResultOf(d1, SentinelException)
f2 = self.failureResultOf(d2, SentinelException)
self.assertIs(f1, f2)
def test_reentrantTCPQueryErrbackOnConnectionFailure(self):
"""
An errback on the deferred returned by
L{client.Resolver.queryTCP} may trigger another TCP query.
"""
reactor = proto_helpers.MemoryReactor()
resolver = client.Resolver(
servers=[('127.0.0.1', 10053)],
reactor=reactor)
q = dns.Query('example.com')
# First query sent
d = resolver.queryTCP(q)
# Repeat the query when the first query fails
def reissue(e):
e.trap(ConnectionRefusedError)
return resolver.queryTCP(q)
d.addErrback(reissue)
self.assertEqual(len(reactor.tcpClients), 1)
self.assertEqual(len(reactor.connectors), 1)
host, port, factory, timeout, bindAddress = reactor.tcpClients[0]
# First query fails
f1 = failure.Failure(ConnectionRefusedError())
factory.clientConnectionFailed(
reactor.connectors[0],
f1)
# A second TCP connection is immediately attempted
self.assertEqual(len(reactor.tcpClients), 2)
self.assertEqual(len(reactor.connectors), 2)
# No result expected until the second chained query returns
self.assertNoResult(d)
# Second query fails
f2 = failure.Failure(ConnectionRefusedError())
factory.clientConnectionFailed(
reactor.connectors[1],
f2)
# Original deferred now fires with the second failure
f = self.failureResultOf(d, ConnectionRefusedError)
self.assertIs(f, f2)
def test_pendingEmptiedInPlaceOnError(self):
"""
When the TCP connection attempt fails, the
L{client.Resolver.pending} list is emptied in place. It is not
replaced with a new empty list.
"""
reactor = proto_helpers.MemoryReactor()
resolver = client.Resolver(
servers=[('192.0.2.100', 53)],
reactor=reactor)
d = resolver.queryTCP(dns.Query('example.com'))
host, port, factory, timeout, bindAddress = reactor.tcpClients[0]
prePending = resolver.pending
self.assertEqual(len(prePending), 1)
class SentinelException(Exception):
pass
factory.clientConnectionFailed(
reactor.connectors[0], failure.Failure(SentinelException()))
self.failureResultOf(d, SentinelException)
self.assertIs(resolver.pending, prePending)
self.assertEqual(len(prePending), 0)
class ClientTests(unittest.TestCase):
def setUp(self):
"""
Replace the resolver with a FakeResolver
"""
client.theResolver = FakeResolver()
self.hostname = b'example.com'
self.hostnameForGetHostByName = b'getHostByNameTest'
def tearDown(self):
"""
By setting the resolver to None, it will be recreated next time a name
lookup is done.
"""
client.theResolver = None
def checkResult(self, results, qtype):
"""
Verify that the result is the same query type as what is expected.
"""
answers, authority, additional = results
result = answers[0]
self.assertEqual(result.name.name, self.hostname)
self.assertEqual(result.type, qtype)
def checkGetHostByName(self, result):
"""
Test that the getHostByName query returns the 127.0.0.1 address.
"""
self.assertEqual(result, '127.0.0.1')
def test_getHostByName(self):
"""
do a getHostByName of a value that should return 127.0.0.1.
"""
d = client.getHostByName(self.hostnameForGetHostByName)
d.addCallback(self.checkGetHostByName)
return d
def test_lookupAddress(self):
"""
Do a lookup and test that the resolver will issue the correct type of
query type. We do this by checking that FakeResolver returns a result
record with the same query type as what we issued.
"""
d = client.lookupAddress(self.hostname)
d.addCallback(self.checkResult, dns.A)
return d
def test_lookupIPV6Address(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupIPV6Address(self.hostname)
d.addCallback(self.checkResult, dns.AAAA)
return d
def test_lookupAddress6(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAddress6(self.hostname)
d.addCallback(self.checkResult, dns.A6)
return d
def test_lookupNameservers(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupNameservers(self.hostname)
d.addCallback(self.checkResult, dns.NS)
return d
def test_lookupCanonicalName(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupCanonicalName(self.hostname)
d.addCallback(self.checkResult, dns.CNAME)
return d
def test_lookupAuthority(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAuthority(self.hostname)
d.addCallback(self.checkResult, dns.SOA)
return d
def test_lookupMailBox(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailBox(self.hostname)
d.addCallback(self.checkResult, dns.MB)
return d
def test_lookupMailGroup(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailGroup(self.hostname)
d.addCallback(self.checkResult, dns.MG)
return d
def test_lookupMailRename(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailRename(self.hostname)
d.addCallback(self.checkResult, dns.MR)
return d
def test_lookupNull(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupNull(self.hostname)
d.addCallback(self.checkResult, dns.NULL)
return d
def test_lookupWellKnownServices(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupWellKnownServices(self.hostname)
d.addCallback(self.checkResult, dns.WKS)
return d
def test_lookupPointer(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupPointer(self.hostname)
d.addCallback(self.checkResult, dns.PTR)
return d
def test_lookupHostInfo(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupHostInfo(self.hostname)
d.addCallback(self.checkResult, dns.HINFO)
return d
def test_lookupMailboxInfo(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailboxInfo(self.hostname)
d.addCallback(self.checkResult, dns.MINFO)
return d
def test_lookupMailExchange(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupMailExchange(self.hostname)
d.addCallback(self.checkResult, dns.MX)
return d
def test_lookupText(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupText(self.hostname)
d.addCallback(self.checkResult, dns.TXT)
return d
def test_lookupSenderPolicy(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupSenderPolicy(self.hostname)
d.addCallback(self.checkResult, dns.SPF)
return d
def test_lookupResponsibility(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupResponsibility(self.hostname)
d.addCallback(self.checkResult, dns.RP)
return d
def test_lookupAFSDatabase(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAFSDatabase(self.hostname)
d.addCallback(self.checkResult, dns.AFSDB)
return d
def test_lookupService(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupService(self.hostname)
d.addCallback(self.checkResult, dns.SRV)
return d
def test_lookupZone(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupZone(self.hostname)
d.addCallback(self.checkResult, dns.AXFR)
return d
def test_lookupAllRecords(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupAllRecords(self.hostname)
d.addCallback(self.checkResult, dns.ALL_RECORDS)
return d
def test_lookupNamingAuthorityPointer(self):
"""
See L{test_lookupAddress}
"""
d = client.lookupNamingAuthorityPointer(self.hostname)
d.addCallback(self.checkResult, dns.NAPTR)
return d
def test_query(self):
"""
L{client.query} accepts a L{dns.Query} instance and dispatches
it to L{client.theResolver}.C{query}, which in turn dispatches
to an appropriate C{lookup*} method of L{client.theResolver},
based on the L{dns.Query} type.
"""
q = dns.Query(self.hostname, dns.A)
d = client.query(q)
d.addCallback(self.checkResult, dns.A)
return d
class FilterAnswersTests(unittest.TestCase):
"""
Test L{twisted.names.client.Resolver.filterAnswers}'s handling of various
error conditions it might encounter.
"""
def setUp(self):
# Create a resolver pointed at an invalid server - we won't be hitting
# the network in any of these tests.
self.resolver = client.Resolver(servers=[('0.0.0.0', 0)])
def test_truncatedMessage(self):
"""
Test that a truncated message results in an equivalent request made via
TCP.
"""
m = dns.Message(trunc=True)
m.addQuery(b'example.com')
def queryTCP(queries):
self.assertEqual(queries, m.queries)
response = dns.Message()
response.answers = ['answer']
response.authority = ['authority']
response.additional = ['additional']
return defer.succeed(response)
self.resolver.queryTCP = queryTCP
d = self.resolver.filterAnswers(m)
d.addCallback(
self.assertEqual, (['answer'], ['authority'], ['additional']))
return d
def _rcodeTest(self, rcode, exc):
m = dns.Message(rCode=rcode)
err = self.resolver.filterAnswers(m)
err.trap(exc)
def test_formatError(self):
"""
Test that a message with a result code of C{EFORMAT} results in a
failure wrapped around L{DNSFormatError}.
"""
return self._rcodeTest(dns.EFORMAT, error.DNSFormatError)
def test_serverError(self):
"""
Like L{test_formatError} but for C{ESERVER}/L{DNSServerError}.
"""
return self._rcodeTest(dns.ESERVER, error.DNSServerError)
def test_nameError(self):
"""
Like L{test_formatError} but for C{ENAME}/L{DNSNameError}.
"""
return self._rcodeTest(dns.ENAME, error.DNSNameError)
def test_notImplementedError(self):
"""
Like L{test_formatError} but for C{ENOTIMP}/L{DNSNotImplementedError}.
"""
return self._rcodeTest(dns.ENOTIMP, error.DNSNotImplementedError)
def test_refusedError(self):
"""
Like L{test_formatError} but for C{EREFUSED}/L{DNSQueryRefusedError}.
"""
return self._rcodeTest(dns.EREFUSED, error.DNSQueryRefusedError)
def test_refusedErrorUnknown(self):
"""
Like L{test_formatError} but for an unrecognized error code and
L{DNSUnknownError}.
"""
return self._rcodeTest(dns.EREFUSED + 1, error.DNSUnknownError)
class FakeDNSDatagramProtocol(object):
def __init__(self):
self.queries = []
self.transport = StubPort()
def query(self, address, queries, timeout=10, id=None):
self.queries.append((address, queries, timeout, id))
return defer.fail(error.DNSQueryTimeoutError(queries))
def removeResend(self, id):
# Ignore this for the time being.
pass
class RetryLogicTests(unittest.TestCase):
"""
Tests for query retrying implemented by L{client.Resolver}.
"""
testServers = [
'1.2.3.4',
'4.3.2.1',
'a.b.c.d',
'z.y.x.w']
def test_roundRobinBackoff(self):
"""
When timeouts occur waiting for responses to queries, the next
configured server is issued the query. When the query has been issued
to all configured servers, the timeout is increased and the process
begins again at the beginning.
"""
addrs = [(x, 53) for x in self.testServers]
r = client.Resolver(resolv=None, servers=addrs)
proto = FakeDNSDatagramProtocol()
r._connectedProtocol = lambda: proto
return r.lookupAddress(b"foo.example.com"
).addCallback(self._cbRoundRobinBackoff
).addErrback(self._ebRoundRobinBackoff, proto
)
def _cbRoundRobinBackoff(self, result):
self.fail("Lookup address succeeded, should have timed out")
def _ebRoundRobinBackoff(self, failure, fakeProto):
failure.trap(defer.TimeoutError)
# Assert that each server is tried with a particular timeout
# before the timeout is increased and the attempts are repeated.
for t in (1, 3, 11, 45):
tries = fakeProto.queries[:len(self.testServers)]
del fakeProto.queries[:len(self.testServers)]
tries.sort()
expected = list(self.testServers)
expected.sort()
for ((addr, query, timeout, id), expectedAddr) in zip(tries, expected):
self.assertEqual(addr, (expectedAddr, 53))
self.assertEqual(timeout, t)
self.assertFalse(fakeProto.queries)
class ThreadedResolverTests(unittest.TestCase):
"""
Tests for L{client.ThreadedResolver}.
"""
def test_deprecated(self):
"""
L{client.ThreadedResolver} is deprecated. Instantiating it emits a
deprecation warning pointing at the code that does the instantiation.
"""
client.ThreadedResolver()
warnings = self.flushWarnings(offendingFunctions=[self.test_deprecated])
self.assertEqual(
warnings[0]['message'],
"twisted.names.client.ThreadedResolver is deprecated since "
"Twisted 9.0, use twisted.internet.base.ThreadedResolver "
"instead.")
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(len(warnings), 1)
|
|
# -*- coding: utf-8 -*-
from django.conf import settings
from datetime import datetime
import logging
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from django.test.utils import override_settings
from model_mommy import mommy
from coop_cms.apps.test_app.models import TestClass, TestTag
from coop_cms import settings as coop_settings
from coop_cms.models import BaseArticle, Newsletter
from coop_cms.tests import BeautifulSoup, BaseArticleTest
from coop_cms.tests.test_newsletter import NewsletterSettingsTest
from coop_cms.utils import get_login_url
class BaseTestCase(TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def _log_as_viewer(self):
self.viewer = user = User.objects.create_user('viewer', '[email protected]', 'viewer')
return self.client.login(username='viewer', password='viewer')
def _log_as_editor(self):
self.editor = user = User.objects.create_user('editor', '[email protected]', 'editor')
content_type = ContentType.objects.get_for_model(TestClass)
perm = 'change_{0}'.format(content_type.model)
can_edit = Permission.objects.get(content_type=content_type, codename=perm)
user.user_permissions.add(can_edit)
perm = 'add_{0}'.format(content_type.model)
can_add = Permission.objects.get(content_type=content_type, codename=perm)
user.user_permissions.add(can_add)
user.is_active = True
user.is_staff = True # can_edit_object
user.save()
return self.client.login(username='editor', password='editor')
class GenericViewTestCase(BaseTestCase):
def test_view_list_objects(self):
obj = mommy.make(TestClass)
response = self.client.get(obj.get_list_url())
self.assertEqual(200, response.status_code)
def test_view_object_anomymous(self):
obj = mommy.make(TestClass)
url = obj.get_absolute_url()
response = self.client.get(url)
if coop_settings.is_perm_middleware_installed():
self.assertEqual(302, response.status_code)
auth_url = get_login_url()
self.assertRedirects(response, auth_url + '?next=' + url)
else:
self.assertEqual(403, response.status_code)
def test_edit_object_anonymous(self):
obj = mommy.make(TestClass)
url = obj.get_edit_url()
response = self.client.get(url)
if coop_settings.is_perm_middleware_installed():
self.assertEqual(302, response.status_code)
auth_url = get_login_url()
self.assertRedirects(response, auth_url + '?next=' + url)
else:
self.assertEqual(403, response.status_code)
field1, field2 = obj.field1, obj.field2
data = {'field1': "ABC", 'field2': "DEF", 'bool_field': True, 'int_field': 2, 'float_field': 3.14}
response = self.client.post(url, data=data)
if coop_settings.is_perm_middleware_installed():
self.assertEqual(302, response.status_code)
auth_url = get_login_url()
self.assertRedirects(response, auth_url + '?next=' + url)
else:
self.assertEqual(403, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertEqual(obj.field1, field1)
self.assertEqual(obj.field2, field2)
def test_view_object_viewer(self):
self._log_as_viewer()
obj = mommy.make(TestClass)
response = self.client.get(obj.get_absolute_url())
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
self.assertEqual("ABC", soup.select("#properties")[0].text)
def test_view_object_viewer_bool_true(self):
self._log_as_viewer()
obj = mommy.make(TestClass, bool_field=True)
response = self.client.get(obj.get_absolute_url())
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
self.assertEqual(1, len(soup.select(".bool_field_is_true")))
self.assertEqual(0, len(soup.select(".bool_field_is_false")))
def test_view_object_m2m_relationships(self):
self._log_as_viewer()
obj = mommy.make(TestClass)
tag1 = mommy.make(TestTag, name='The-Tag-1#')
tag2 = mommy.make(TestTag, name='The-Tag-2#')
tag3 = mommy.make(TestTag, name='The-Tag-3#')
obj.tags.add(tag1)
obj.tags.add(tag2)
obj.save()
response = self.client.get(obj.get_absolute_url())
self.assertEqual(200, response.status_code)
self.assertContains(response, tag1.name)
self.assertContains(response, tag2.name)
self.assertNotContains(response, tag3.name)
def test_view_object_viewer_bool_false(self):
self._log_as_viewer()
obj = mommy.make(TestClass, bool_field=False)
response = self.client.get(obj.get_absolute_url())
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
self.assertEqual(0, len(soup.select(".bool_field_is_true")))
self.assertEqual(1, len(soup.select(".bool_field_is_false")))
def test_edit_object_viewer(self):
self._log_as_viewer()
obj = mommy.make(TestClass)
response = self.client.get(obj.get_edit_url())
self.assertEqual(403, response.status_code)
field1, field2 = obj.field1, obj.field2
data = {'field1': "ABC", 'field2': "DEF", 'bool_field': True, 'int_field': 2, 'float_field': 3.14}
response = self.client.post(obj.get_edit_url(), data=data, follow=True)
self.assertEqual(403, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertEqual(obj.field1, field1)
self.assertEqual(obj.field2, field2)
def test_view_object_editor(self):
self._log_as_editor()
obj = mommy.make(TestClass)
response = self.client.get(obj.get_absolute_url())
self.assertEqual(200, response.status_code)
def test_edit_object_editor(self):
self._log_as_editor()
obj = mommy.make(TestClass)
response = self.client.get(obj.get_edit_url())
self.assertEqual(200, response.status_code)
data = {'field1': "ABC", 'field2': "DEF", 'bool_field': True, 'int_field': 2, 'float_field': 3.14}
response = self.client.post(obj.get_edit_url(), data=data, follow=True)
self.assertEqual(200, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertEqual(obj.field1, data["field1"])
self.assertEqual(obj.field2, data["field2"])
self.assertEqual(obj.bool_field, data["bool_field"])
self.assertEqual(obj.int_field, data["int_field"])
self.assertEqual(obj.float_field, data["float_field"])
def test_edit_object_inactive(self):
self._log_as_editor()
self.editor.is_active = False
self.editor.save()
obj = mommy.make(TestClass)
response = self.client.get(obj.get_edit_url())
self.assertEqual(403, response.status_code)
data = {'field1': "ABC", 'field2': "DEF", 'bool_field': True, 'int_field': 2, 'float_field': 3.14}
response = self.client.post(obj.get_edit_url(), data=data, follow=True)
self.assertEqual(403, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertNotEqual(obj.field1, data["field1"])
self.assertNotEqual(obj.field2, data["field2"])
class FormsetViewTestCase(BaseTestCase):
def test_view_formset_no_objects(self):
self._log_as_viewer()
url = reverse('coop_cms_testapp_formset')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
self.assertEqual(0, len(soup.select('form')))
def test_edit_formset_no_objects(self):
self._log_as_editor()
url = reverse('coop_cms_testapp_formset_edit')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
self.assertEqual(1, len(soup.select('form')))
def test_view_formset_one_object(self):
self._log_as_viewer()
obj = mommy.make(TestClass)
url = reverse('coop_cms_testapp_formset')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
self.assertEqual(0, len(soup.select('form')))
self.assertContains(response, obj.field1)
self.assertContains(response, obj.field2)
self.assertContains(response, obj.other_field)
#def test_edit_formset_one_object(self):
# self._log_as_viewer()
#
# obj = mommy.make(TestClass)
#
# url = reverse('coop_cms_testapp_formset_edit')
#
# response = self.client.get(url)
# self.assertEqual(200, response.status_code)
#
# soup = BeautifulSoup(response.content)
# self.assertEqual(1, len(soup.select('form')))
#
# print response.content
#
# self.assertContains(response, obj.field1)
# self.assertContains(response, obj.field2)
# self.assertContains(response, obj.other_field)
def test_view_formset_several_object(self):
self._log_as_viewer()
obj1 = mommy.make(TestClass)
obj2 = mommy.make(TestClass)
obj3 = mommy.make(TestClass)
objects = [obj1, obj2, obj3]
url = reverse('coop_cms_testapp_formset')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
for obj in objects:
self.assertContains(response, obj.field1)
self.assertContains(response, obj.field2)
self.assertContains(response, obj.other_field)
def test_edit_formset_no_objects(self):
self._log_as_editor()
url = reverse('coop_cms_testapp_formset_edit')
data = {
'form-TOTAL_FORMS': 0,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1,
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
def test_post_formset_on_view(self):
self._log_as_editor()
url = reverse('coop_cms_testapp_formset')
data = {
'form-TOTAL_FORMS': 0,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1,
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(404, response.status_code)
def test_post_edit_formset_one_object(self):
self._log_as_editor()
obj = mommy.make(TestClass)
url = reverse('coop_cms_testapp_formset_edit')
other_field = obj.other_field
data = {
'form-0-id': obj.id,
'form-0-field1': "AZERTYUIOP",
'form-0-field2': "<p>QWERTY/nUIOP</p>",
#'form-0-field3': "",
'form-0-other_field': "wxcvbn",
'form-0-bool_field': True,
'form-0-int_field': 2,
'form-0-float_field': 3.14,
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1,
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertContains(response, obj.field1)
self.assertContains(response, obj.field2)
self.assertContains(response, other_field)
self.assertEqual(data['form-0-field1'], obj.field1)
self.assertEqual(data['form-0-field2'], obj.field2)
self.assertEqual(other_field, obj.other_field)
self.assertEqual(data['form-0-bool_field'], obj.bool_field)
self.assertEqual(data['form-0-int_field'], obj.int_field)
self.assertEqual(data['form-0-float_field'], obj.float_field)
def test_edit_formset_several_object(self):
self._log_as_editor()
obj1 = mommy.make(TestClass)
obj2 = mommy.make(TestClass)
data = {
'form-0-id': obj1.id,
'form-0-field1': "AZERTYUIOP",
'form-0-field2': "<p>QWERTY/nUIOP</p>",
'form-0-field3': "AZDD",
'form-0-bool_field': True,
'form-0-int_field': 2,
'form-0-float_field': 3.14,
'form-1-id': obj2.id,
'form-1-field1': "POIUYTREZA",
'form-1-field2': "<p>MLKJHGFDSQ</p>",
'form-1-field3': "QSkk",
'form-1-bool_field': False,
'form-1-int_field': 2,
'form-1-float_field': 3.14,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 2,
'form-MAX_NUM_FORMS': 2,
}
url = reverse('coop_cms_testapp_formset_edit')
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
objects = TestClass.objects.all()
for i, obj in enumerate(objects):
self.assertEqual(data['form-{0}-field1'.format(i)], obj.field1)
self.assertEqual(data['form-{0}-field2'.format(i)], obj.field2)
self.assertEqual(data['form-{0}-field3'.format(i)], obj.field3)
self.assertEqual(data['form-{0}-bool_field'.format(i)], obj.bool_field)
self.assertEqual(data['form-{0}-int_field'.format(i)], obj.int_field)
self.assertEqual(data['form-{0}-float_field'.format(i)], obj.float_field)
def test_edit_formset_extra_1(self):
self._log_as_editor()
obj1 = mommy.make(TestClass)
data = {
'form-0-id': obj1.id,
'form-0-field1': "AZERTYUIOP",
'form-0-field2': "<p>QWERTY/nUIOP</p>",
'form-0-field3': "AZDD",
'form-0-bool_field': True,
'form-0-int_field': 2,
'form-0-float_field': 3.14,
'form-1-id': '',
'form-1-field1': "POIUYTREZA",
'form-1-field2': "<p>MLKJHGFDSQ</p>",
'form-1-field3': "QSkk",
'form-1-bool_field': True,
'form-1-int_field': 2,
'form-1-float_field': 3.14,
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 2,
}
url = reverse('coop_cms_testapp_formset_edit')
response = self.client.post(url, data=data, follow=True)
self.assertEqual(200, response.status_code)
objects = TestClass.objects.all()
self.assertEqual(2, objects.count())
for i, obj in enumerate(objects):
self.assertEqual(data['form-{0}-field1'.format(i)], obj.field1)
self.assertEqual(data['form-{0}-field2'.format(i)], obj.field2)
self.assertEqual(data['form-{0}-field3'.format(i)], obj.field3)
self.assertEqual(data['form-{0}-bool_field'.format(i)], obj.bool_field)
self.assertEqual(data['form-{0}-int_field'.format(i)], obj.int_field)
self.assertEqual(data['form-{0}-float_field'.format(i)], obj.float_field)
def test_edit_formset_anonymous(self):
obj = mommy.make(TestClass)
url = reverse('coop_cms_testapp_formset_edit')
other_field = obj.other_field
data = {
'form-0-id': obj.id,
'form-0-field1': "AZERTYUIOP",
'form-0-field2': "<p>QWERTY/nUIOP</p>",
'form-0-bool_field': True,
'form-0-int_field': 2,
'form-0-float_field': 3.14,
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1,
}
response = self.client.post(url, data=data)
if coop_settings.is_perm_middleware_installed():
self.assertEqual(302, response.status_code)
auth_url = get_login_url()
self.assertRedirects(response, auth_url+'?next='+url)
else:
self.assertEqual(403, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertNotEqual(data['form-0-field1'], obj.field1)
self.assertNotEqual(data['form-0-field2'], obj.field2)
def test_edit_formset_viewer(self):
self._log_as_viewer()
obj = mommy.make(TestClass)
url = reverse('coop_cms_testapp_formset_edit')
other_field = obj.other_field
data = {
'form-0-id': obj.id,
'form-0-field1': "AZERTYUIOP",
'form-0-field2': "<p>QWERTY/nUIOP</p>",
'form-0-bool_field': True,
'form-0-int_field': 2,
'form-0-float_field': 3.14,
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1,
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(403, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertNotEqual(data['form-0-field1'], obj.field1)
self.assertNotEqual(data['form-0-field2'], obj.field2)
def test_edit_formset_inactive(self):
self._log_as_editor()
self.editor.is_active = False
self.editor.save()
obj = mommy.make(TestClass)
url = reverse('coop_cms_testapp_formset_edit')
other_field = obj.other_field
data = {
'form-0-id': obj.id,
'form-0-field1': "AZERTYUIOP",
'form-0-field2': "<p>QWERTY/nUIOP</p>",
'form-0-bool_field': True,
'form-0-int_field': 2,
'form-0-float_field': 3.14,
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MAX_NUM_FORMS': 1,
}
response = self.client.post(url, data=data, follow=True)
self.assertEqual(403, response.status_code)
obj = TestClass.objects.get(id=obj.id)
self.assertNotEqual(data['form-0-field1'], obj.field1)
self.assertNotEqual(data['form-0-field2'], obj.field2)
class ArticleFormTest(BaseTestCase):
def _log_as_viewer(self):
self.viewer = user = User.objects.create_user('viewer', '[email protected]', 'viewer')
return self.client.login(username='viewer', password='viewer')
def _log_as_editor(self):
self.user = user = User.objects.create_user('toto', '[email protected]', 'toto')
ct = ContentType.objects.get_for_model(coop_settings.get_article_class())
perm = 'change_{0}'.format(ct.model)
can_edit_article = Permission.objects.get(content_type=ct, codename=perm)
user.user_permissions.add(can_edit_article)
perm = 'add_{0}'.format(ct.model)
can_add_article = Permission.objects.get(content_type=ct, codename=perm)
user.user_permissions.add(can_add_article)
user.save()
return self.client.login(username='toto', password='toto')
def _settings_fields_to_backup(self):
return (
'COOP_CMS_ARTICLE_SETTINGS_FORM', 'COOP_CMS_NEW_ARTICLE_FORM', 'COOP_CMS_ARTICLE_TEMPLATES',
)
def setUp(self):
self._settings_backup = {}
for s in self._settings_fields_to_backup():
v = getattr(settings, s, None)
if v is None:
self._settings_backup[s] = v
self.LOGIN_URL = settings.LOGIN_URL
settings.LOGIN_URL = get_login_url()
settings.COOP_CMS_NEW_ARTICLE_FORM = 'coop_cms.apps.test_app.forms.MyNewArticleForm'
settings.COOP_CMS_ARTICLE_SETTINGS_FORM = 'coop_cms.apps.test_app.forms.MyArticleSettingsForm'
settings.COOP_CMS_ARTICLE_TEMPLATES = (
('test/article.html', 'Article'),
('test/article_with_blocks.html', 'Article with blocks'),
)
super(ArticleFormTest, self).setUp()
def tearDown(self):
for setting in self._settings_fields_to_backup():
value = self._settings_backup.get(setting, None)
if value is not None:
setattr(settings, setting, value)
settings.LOGIN_URL = self.LOGIN_URL
super(ArticleFormTest, self).tearDown()
def test_view_new_article(self):
self._log_as_editor()
url = reverse('coop_cms_new_article')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content)
self.assertEqual(1, len(soup.select("#id_dummy")))
def test_view_new_article_anonymous(self):
url = reverse('coop_cms_new_article')
response = self.client.get(url)
self.assertEqual(302, response.status_code)
auth_url = get_login_url()
self.assertRedirects(response, auth_url+'?next='+url)
def test_view_article_not_allowed(self):
self._log_as_viewer()
article_class = coop_settings.get_article_class()
url = reverse('coop_cms_new_article')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(article_class.objects.count(), 0)
def test_new_article(self):
self._log_as_editor()
article_class = coop_settings.get_article_class()
url = reverse('coop_cms_new_article')
data = {
'title': 'test',
'template': settings.COOP_CMS_ARTICLE_TEMPLATES[0][0],
'publication': BaseArticle.PUBLISHED,
'in_newsletter': False,
'navigation_parent': '',
'sites': [settings.SITE_ID]
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(article_class.objects.count(), 1)
article = article_class.objects.all()[0]
for field in data:
if field == "sites":
self.assertEqual([x.id for x in getattr(article, field).all()], data[field])
elif field == "navigation_parent":
self.assertEqual(getattr(article, field), None)
else:
self.assertEqual(getattr(article, field), data[field])
def test_new_article_anoymous(self):
article_class = coop_settings.get_article_class()
url = reverse('coop_cms_new_article')
data = {
'title': 'test',
'template': settings.COOP_CMS_ARTICLE_TEMPLATES[0][0],
'publication': BaseArticle.PUBLISHED,
'in_newsletter': False,
'navigation_parent': '',
'sites': [settings.SITE_ID],
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
login_url = get_login_url()
self.assertTrue(response['Location'].find(login_url) >= 0)
self.assertEqual(article_class.objects.count(), 0)
def test_new_article_not_allowed(self):
self._log_as_viewer()
article_class = coop_settings.get_article_class()
url = reverse('coop_cms_new_article')
data = {
'title': 'test',
'template': settings.COOP_CMS_ARTICLE_TEMPLATES[0][0],
'publication': BaseArticle.PUBLISHED,
'in_newsletter': False,
'navigation_parent': '',
'sites': [settings.SITE_ID],
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 403)
self.assertEqual(article_class.objects.count(), 0)
def test_view_article_settings(self):
self._log_as_editor()
article_class = coop_settings.get_article_class()
article = mommy.make(article_class, slug="test")
url = reverse('coop_cms_article_settings', args=[article.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content)
self.assertEqual(1, len(soup.select("#id_dummy")))
def test_view_article_settings_anonymous(self):
article_class = coop_settings.get_article_class()
article = mommy.make(article_class, slug="test")
url = reverse('coop_cms_article_settings', args=[article.id])
response = self.client.get(url)
self.assertEqual(302, response.status_code)
auth_url = get_login_url()
self.assertRedirects(response, auth_url+'?next='+url)
def test_view_article_settings_not_allowed(self):
self._log_as_viewer()
article_class = coop_settings.get_article_class()
article = mommy.make(article_class, slug="test")
url = reverse('coop_cms_article_settings', args=[article.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.assertEqual(article_class.objects.count(), 1)
def test_article_settings(self):
self._log_as_editor()
article_class = coop_settings.get_article_class()
article = mommy.make(article_class, slug="test")
url = reverse('coop_cms_article_settings', args=[article.id])
now = datetime.now()
now = now.replace(microsecond=0)
data = {
'template': settings.COOP_CMS_ARTICLE_TEMPLATES[0][0],
'publication_date': now,
'publication': BaseArticle.PUBLISHED,
'in_newsletter': False,
'summary': 'Summary',
'navigation_parent': '',
'sites': [settings.SITE_ID],
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(article_class.objects.count(), 1)
article = article_class.objects.all()[0]
for field in data:
if field == "sites":
self.assertEqual([x.id for x in getattr(article, field).all()], data[field])
elif field == "navigation_parent":
self.assertEqual(article.navigation_parent, None)
else:
self.assertEqual(getattr(article, field), data[field])
def test_article_settings_anonymous(self):
article_class = coop_settings.get_article_class()
article = mommy.make(article_class, slug="test")
url = reverse('coop_cms_article_settings', args=[article.id])
data = {
'template': settings.COOP_CMS_ARTICLE_TEMPLATES[0][0],
'publication_date': datetime.now(),
'publication': BaseArticle.PUBLISHED,
'in_newsletter': False,
'summary': 'Summary',
'navigation_parent': '',
'sites': [settings.SITE_ID],
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
login_url = get_login_url()
self.assertTrue(response['Location'].find(login_url) >= 0)
self.assertEqual(article_class.objects.count(), 1)
article = article_class.objects.all()[0]
self.assertNotEqual(article.summary, data['summary'])
def test_article_settings_not_allowed(self):
self._log_as_viewer()
article_class = coop_settings.get_article_class()
article = mommy.make(article_class, slug="test")
url = reverse('coop_cms_article_settings', args=[article.id])
data = {
'template': settings.COOP_CMS_ARTICLE_TEMPLATES[0][0],
'publication_date': datetime.now(),
'publication': BaseArticle.PUBLISHED,
'in_newsletter': False,
'summary': 'Summary',
'navigation_parent': '',
'sites': [settings.SITE_ID],
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 403)
self.assertEqual(article_class.objects.count(), 1)
article = article_class.objects.all()[0]
self.assertNotEqual(article.summary, data['summary'])
class MyNewsletterSettingsTest(NewsletterSettingsTest):
@override_settings(COOP_CMS_NEWSLETTER_SETTINGS_FORM="coop_cms.apps.test_app.forms.MyNewsletterSettingsForm")
def test_additional_field_on_edit(self):
self._log_as_editor()
newsletter = mommy.make(
Newsletter,
subject="a little intro for this newsletter",
template="test/newsletter_blue.html",
)
url = reverse("coop_cms_newsletter_settings", args=[newsletter.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content)
self.assertEqual(1, len(soup.select("#id_dummy")))
@override_settings(COOP_CMS_NEWSLETTER_SETTINGS_FORM="coop_cms.apps.test_app.forms.MyNewsletterSettingsForm")
def test_additional_field_on_create(self):
self._log_as_editor()
url = reverse("coop_cms_new_newsletter")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content)
self.assertEqual(1, len(soup.select("#id_dummy")))
@override_settings(COOP_CMS_ARTICLE_TEMPLATES=(('coop_cms/test_app/custom_tag_template.html', 'Custom Tag'),))
class CustomTemplateTagInCmsEditTag(BaseArticleTest):
"""test using custom templatetag inside the cms_edit template tag"""
def test_view_with_blocks(self):
"""test view article with block templatetag inside the cms_edit template tag"""
article_class = coop_settings.get_article_class()
article = mommy.make(
article_class,
title="This is my article", content="<p>This is my <b>content</b></p>",
template='coop_cms/test_app/custom_tag_template.html'
)
response = self.client.get(article.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content)
self.assertEqual(3, len(soup.select("ul.custom li")))
self.assertContains(response, article.title)
self.assertContains(response, article.content)
self.assertContains(response, "*** HELLO FROM CHILD ***")
self.assertContains(response, "*** HELLO FROM PARENT ***")
self.assertContains(response, "*** HELLO FROM BLOCK ***")
def test_edit_with_blocks(self):
"""test edition with block templatetag inside the cms_edit template tag"""
article_class = coop_settings.get_article_class()
article = mommy.make(
article_class,
title="This is my article", content="<p>This is my <b>content</b></p>",
template='coop_cms/test_app/custom_tag_template.html'
)
self._log_as_editor()
data = {
"title": "This is a new title",
'content': "<p>This is a <i>*** NEW ***</i> <b>content</b></p>"
}
response = self.client.post(article.get_edit_url(), data=data, follow=True)
self.assertEqual(response.status_code, 200)
article = article_class.objects.get(id=article.id)
self.assertEqual(article.title, data['title'])
self.assertEqual(article.content, data['content'])
self.assertContains(response, article.title)
self.assertContains(response, article.content)
self.assertContains(response, "*** HELLO FROM CHILD ***")
self.assertContains(response, "*** HELLO FROM PARENT ***")
self.assertContains(response, "*** HELLO FROM BLOCK ***")
|
|
from ..errors import AngrMemoryError, AngrTranslationError
from ..analysis import Analysis, register_analysis
from collections import deque
import logging
import math
import networkx
import types
# todo include an explanation of the algorithm
# todo include a method that detects any change other than constants
# todo use function names / string references where available
l = logging.getLogger(name="angr.analyses.bindiff")
# basic block changes
DIFF_TYPE = "type"
DIFF_VALUE = "value"
# exception for trying find basic block changes
class UnmatchedStatementsException(Exception):
pass
# statement difference classes
class Difference(object):
def __init__(self, diff_type, value_a, value_b):
self.type = diff_type
self.value_a = value_a
self.value_b = value_b
class ConstantChange(object):
def __init__(self, offset, value_a, value_b):
self.offset = offset
self.value_a = value_a
self.value_b = value_b
# helper methods
def _euclidean_dist(vector_a, vector_b):
"""
:param vector_a: A list of numbers.
:param vector_b: A list of numbers.
:returns: The euclidean distance between the two vectors.
"""
dist = 0
for (x, y) in zip(vector_a, vector_b):
dist += (x-y)*(x-y)
return math.sqrt(dist)
def _get_closest_matches(input_attributes, target_attributes):
"""
:param input_attributes: First dictionary of objects to attribute tuples.
:param target_attributes: Second dictionary of blocks to attribute tuples.
:returns: A dictionary of objects in the input_attributes to the closest objects in the
target_attributes.
"""
closest_matches = {}
# for each object in the first set find the objects with the closest target attributes
for a in input_attributes:
best_dist = float('inf')
best_matches = []
for b in target_attributes:
dist = _euclidean_dist(input_attributes[a], target_attributes[b])
if dist < best_dist:
best_matches = [b]
best_dist = dist
elif dist == best_dist:
best_matches.append(b)
closest_matches[a] = best_matches
return closest_matches
# from http://rosettacode.org/wiki/Levenshtein_distance
def _levenshtein_distance(s1, s2):
"""
:param s1: A list or string
:param s2: Another list or string
:returns: The levenshtein distance between the two
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for index2, num2 in enumerate(s2):
new_distances = [index2 + 1]
for index1, num1 in enumerate(s1):
if num1 == num2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1+1],
new_distances[-1])))
distances = new_distances
return distances[-1]
def _normalized_levenshtein_distance(s1, s2, acceptable_differences):
"""
This function calculates the levenshtein distance but allows for elements in the lists to be different by any number
in the set acceptable_differences.
:param s1: A list.
:param s2: Another list.
:param acceptable_differences: A set of numbers. If (s2[i]-s1[i]) is in the set then they are considered equal.
:returns:
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
acceptable_differences = set(-i for i in acceptable_differences)
distances = range(len(s1) + 1)
for index2, num2 in enumerate(s2):
new_distances = [index2 + 1]
for index1, num1 in enumerate(s1):
if num2 - num1 in acceptable_differences:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1+1],
new_distances[-1])))
distances = new_distances
return distances[-1]
def _is_better_match(x, y, matched_a, matched_b, attributes_dict_a, attributes_dict_b):
"""
:param x: The first element of a possible match.
:param y: The second element of a possible match.
:param matched_a: The current matches for the first set.
:param matched_b: The current matches for the second set.
:param attributes_dict_a: The attributes for each element in the first set.
:param attributes_dict_b: The attributes for each element in the second set.
:returns: True/False
"""
attributes_x = attributes_dict_a[x]
attributes_y = attributes_dict_b[y]
if x in matched_a:
attributes_match = attributes_dict_b[matched_a[x]]
if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_x, attributes_match):
return False
if y in matched_b:
attributes_match = attributes_dict_a[matched_b[y]]
if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_y, attributes_match):
return False
return True
def differing_constants(block_a, block_b):
"""
Compares two basic blocks and finds all the constants that differ from the first block to the second.
:param block_a: The first block to compare.
:param block_b: The second block to compare.
:returns: Returns a list of differing constants in the form of ConstantChange, which has the offset in the
block and the respective constants.
"""
statements_a = [s for s in block_a.vex.statements if s.tag != "Ist_IMark"] + [block_a.vex.next]
statements_b = [s for s in block_b.vex.statements if s.tag != "Ist_IMark"] + [block_b.vex.next]
if len(statements_a) != len(statements_b):
raise UnmatchedStatementsException("Blocks have different numbers of statements")
start_1 = min(block_a.instruction_addrs)
start_2 = min(block_b.instruction_addrs)
changes = []
# check statements
current_offset = None
for statement, statement_2 in zip(statements_a, statements_b):
# sanity check
if statement.tag != statement_2.tag:
raise UnmatchedStatementsException("Statement tag has changed")
if statement.tag == "Ist_IMark":
if statement.addr - start_1 != statement_2.addr - start_2:
raise UnmatchedStatementsException("Instruction length has changed")
current_offset = statement.addr - start_1
continue
differences = compare_statement_dict(statement, statement_2)
for d in differences:
if d.type != DIFF_VALUE:
raise UnmatchedStatementsException("Instruction has changed")
else:
changes.append(ConstantChange(current_offset, d.value_a, d.value_b))
return changes
def compare_statement_dict(statement_1, statement_2):
# should return whether or not the statement's type/effects changed
# need to return the specific number that changed too
if type(statement_1) != type(statement_2):
return [Difference(DIFF_TYPE, None, None)]
# None
if statement_1 is None and statement_2 is None:
return []
# constants
if isinstance(statement_1, (int, long, float, str)):
if isinstance(statement_1, float) and math.isnan(statement_1) and math.isnan(statement_2):
return []
elif statement_1 == statement_2:
return []
else:
return [Difference(None, statement_1, statement_2)]
# tuples/lists
if isinstance(statement_1, (tuple, list)):
if len(statement_1) != len(statement_2):
return Difference(DIFF_TYPE, None, None)
differences = []
for s1, s2 in zip(statement_1, statement_2):
differences += compare_statement_dict(s1, s2)
return differences
# Yan's weird types
differences = []
for attr in statement_1.__dict__:
# don't check arch, property, or methods
if attr == "arch":
continue
if hasattr(statement_1.__class__, attr) and isinstance(getattr(statement_1.__class__, attr), property):
continue
if isinstance(getattr(statement_1, attr), types.MethodType):
continue
new_diffs = compare_statement_dict(getattr(statement_1, attr), getattr(statement_2, attr))
# set the difference types
for diff in new_diffs:
if diff.type is None:
diff.type = attr
differences += new_diffs
return differences
class NormalizedBlock(object):
# block may span multiple calls
def __init__(self, block, function):
addresses = [block.addr]
if block.addr in function.merged_blocks:
for a in function.merged_blocks[block.addr]:
addresses.append(a.addr)
self.addr = block.addr
self.addresses = addresses
self.statements = []
self.all_constants = []
self.operations = []
self.call_targets = []
self.blocks = []
self.instruction_addrs = []
if block.addr in function.call_sites:
self.call_targets = function.call_sites[block.addr]
self.jumpkind = None
for a in addresses:
block = function.project.factory.block(a)
self.instruction_addrs += block.instruction_addrs
irsb = block.vex
self.blocks.append(block)
self.statements += irsb.statements
self.all_constants += irsb.all_constants
self.operations += irsb.operations
self.jumpkind = irsb.jumpkind
self.size = sum([b.size for b in self.blocks])
def __repr__(self):
size = sum([b.size for b in self.blocks])
return '<Normalized Block for %#x, %d bytes>' % (self.addr, size)
class NormalizedFunction(object):
# a more normalized function
def __init__(self, function):
# start by copying the graph
self.graph = function.graph.copy()
self.project = function._function_manager._kb._project
self.call_sites = dict()
self.startpoint = function.startpoint
self.merged_blocks = dict()
self.orig_function = function
# find nodes which end in call and combine them
done = False
while not done:
done = True
for node in self.graph.nodes():
try:
bl = self.project.factory.block(node.addr)
except AngrMemoryError:
continue
except AngrTranslationError:
continue
# merge if it ends with a single call, and the successor has only one predecessor and succ is after
successors = self.graph.successors(node)
if bl.vex.jumpkind == "Ijk_Call" and len(successors) == 1 and \
len(self.graph.predecessors(successors[0])) == 1 and successors[0].addr > node.addr:
# add edges to the successors of its successor, and delete the original successors
succ = self.graph.successors(node)[0]
for s in self.graph.successors(succ):
self.graph.add_edge(node, s)
self.graph.remove_node(succ)
done = False
# add to merged blocks
if node not in self.merged_blocks:
self.merged_blocks[node] = []
self.merged_blocks[node].append(succ)
if succ in self.merged_blocks:
self.merged_blocks[node] += self.merged_blocks[succ]
del self.merged_blocks[succ]
# stop iterating and start over
break
# set up call sites
for n in self.graph.nodes():
call_targets = []
if n.addr in self.orig_function.get_call_sites():
call_targets.append(self.orig_function.get_call_target(n.addr))
if n.addr in self.merged_blocks:
for block in self.merged_blocks[n]:
if block.addr in self.orig_function.get_call_sites():
call_targets.append(self.orig_function.get_call_target(block.addr))
if len(call_targets) > 0:
self.call_sites[n] = call_targets
class FunctionDiff(object):
"""
This class computes the a diff between two functions.
"""
def __init__(self, function_a, function_b, bindiff=None):
"""
:param function_a: The first angr Function object to diff.
:param function_b: The second angr Function object.
:param bindiff: An optional Bindiff object. Used for some extra normalization during basic block comparison.
"""
self._function_a = NormalizedFunction(function_a)
self._function_b = NormalizedFunction(function_b)
self._project_a = self._function_a.project
self._project_b = self._function_b.project
self._bindiff = bindiff
self._attributes_a = dict()
self._attributes_a = dict()
self._block_matches = set()
self._unmatched_blocks_from_a = set()
self._unmatched_blocks_from_b = set()
self._compute_diff()
@property
def probably_identical(self):
"""
:returns: Whether or not these two functions are identical.
"""
if len(self._unmatched_blocks_from_a | self._unmatched_blocks_from_b) > 0:
return False
for (a, b) in self._block_matches:
if not self.blocks_probably_identical(a, b):
return False
return True
@property
def identical_blocks(self):
"""
:returns: A list of block matches which appear to be identical
"""
identical_blocks = []
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b):
identical_blocks.append((block_a, block_b))
return identical_blocks
@property
def differing_blocks(self):
"""
:returns: A list of block matches which appear to differ
"""
differing_blocks = []
for (block_a, block_b) in self._block_matches:
if not self.blocks_probably_identical(block_a, block_b):
differing_blocks.append((block_a, block_b))
return differing_blocks
@property
def blocks_with_differing_constants(self):
"""
:return: A list of block matches which appear to differ
"""
differing_blocks = []
diffs = dict()
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b) and \
not self.blocks_probably_identical(block_a, block_b, check_constants=True):
differing_blocks.append((block_a, block_b))
for block_a, block_b in differing_blocks:
ba = NormalizedBlock(block_a, self._function_a)
bb = NormalizedBlock(block_b, self._function_b)
diffs[(block_a, block_b)] = FunctionDiff._block_diff_constants(ba, bb)
return diffs
@property
def block_matches(self):
return self._block_matches
@property
def unmatched_blocks(self):
return self._unmatched_blocks_from_a, self._unmatched_blocks_from_b
@staticmethod
def get_normalized_block(addr, function):
"""
:param addr: Where to start the normalized block.
:param function: A function containing the block address.
:returns: A normalized basic block.
"""
return NormalizedBlock(addr, function)
def block_similarity(self, block_a, block_b):
"""
:param block_a: The first block address.
:param block_b: The second block address.
:returns: The similarity of the basic blocks, normalized for the base address of the block and function
call addresses.
"""
# handle sim procedure blocks
if self._project_a.is_hooked(block_a) and self._project_b.is_hooked(block_b):
if self._project_a._sim_procedures[block_a] == self._project_b._sim_procedures[block_b]:
return 1.0
else:
return 0.0
try:
block_a = NormalizedBlock(block_a, self._function_a)
except AngrMemoryError:
block_a = None
except AngrTranslationError:
block_a = None
try:
block_b = NormalizedBlock(block_b, self._function_b)
except AngrMemoryError:
block_b = None
except AngrTranslationError:
block_b = None
# if both were None then they are assumed to be the same, if only one was the same they are assumed to differ
if block_a is None and block_b is None:
return 1.0
elif block_a is None or block_b is None:
return 0.0
# get all elements for computing similarity
tags_a = [s.tag for s in block_a.statements]
tags_b = [s.tag for s in block_b.statements]
consts_a = [c.value for c in block_a.all_constants]
consts_b = [c.value for c in block_b.all_constants]
all_registers_a = [s.offset for s in block_a.statements if hasattr(s, "offset")]
all_registers_b = [s.offset for s in block_b.statements if hasattr(s, "offset")]
jumpkind_a = block_a.jumpkind
jumpkind_b = block_b.jumpkind
# compute total distance
total_dist = 0
total_dist += _levenshtein_distance(tags_a, tags_b)
total_dist += _levenshtein_distance(block_a.operations, block_b.operations)
total_dist += _levenshtein_distance(all_registers_a, all_registers_b)
acceptable_differences = self._get_acceptable_constant_differences(block_a, block_b)
total_dist += _normalized_levenshtein_distance(consts_a, consts_b, acceptable_differences)
total_dist += 0 if jumpkind_a == jumpkind_b else 1
# compute similarity
num_values = max(len(tags_a), len(tags_b))
num_values += max(len(consts_a), len(consts_b))
num_values += max(len(block_a.operations), len(block_b.operations))
num_values += 1 # jumpkind
similarity = 1 - (float(total_dist) / num_values)
return similarity
def blocks_probably_identical(self, block_a, block_b, check_constants=False):
"""
:param block_a: The first block address.
:param block_b: The second block address.
:param check_constants: Whether or not to require matching constants in blocks.
:returns: Whether or not the blocks appear to be identical.
"""
# handle sim procedure blocks
if self._project_a.is_hooked(block_a) and self._project_b.is_hooked(block_b):
return self._project_a._sim_procedures[block_a] == self._project_b._sim_procedures[block_b]
try:
block_a = NormalizedBlock(block_a, self._function_a)
except AngrMemoryError:
block_a = None
except AngrTranslationError:
block_a = None
try:
block_b = NormalizedBlock(block_b, self._function_b)
except AngrMemoryError:
block_b = None
except AngrTranslationError:
block_b = None
# if both were None then they are assumed to be the same, if only one was None they are assumed to differ
if block_a is None and block_b is None:
return True
elif block_a is None or block_b is None:
return False
# if they represent a different number of blocks they are not the same
if len(block_a.blocks) != len(block_b.blocks):
return False
# check differing constants
try:
diff_constants = FunctionDiff._block_diff_constants(block_a, block_b)
except UnmatchedStatementsException:
return False
if not check_constants:
return True
# get values of differences that probably indicate no change
acceptable_differences = self._get_acceptable_constant_differences(block_a, block_b)
# todo match globals
for c in diff_constants:
if (c.value_a, c.value_b) in self._block_matches:
# constants point to matched basic blocks
continue
if self._bindiff is not None and (c.value_a and c.value_b) in self._bindiff.function_matches:
# constants point to matched functions
continue
# if both are in the binary we'll assume it's okay, although we should really match globals
# TODO use global matches
if self._project_a.loader.main_bin.contains_addr(c.value_a) and \
self._project_b.loader.main_bin.contains_addr(c.value_b):
continue
# if the difference is equal to the difference in block addr's or successor addr's we'll say it's also okay
if (c.value_b - c.value_a) in acceptable_differences:
continue
# otherwise they probably are different
return False
# the blocks appear to be identical
return True
@staticmethod
def _block_diff_constants(block_a, block_b):
diff_constants = []
for irsb_a, irsb_b in zip(block_a.blocks, block_b.blocks):
diff_constants += differing_constants(irsb_a, irsb_b)
return diff_constants
@staticmethod
def _compute_block_attributes(function):
"""
:param function: A normalized function object.
:returns: A dictionary of basic block addresses to tuples of attributes.
"""
# The attributes we use are the distance form function start, distance from function exit and whether
# or not it has a subfunction call
distances_from_start = FunctionDiff._distances_from_function_start(function)
distances_from_exit = FunctionDiff._distances_from_function_exit(function)
call_sites = function.call_sites
attributes = {}
for block in function.graph.nodes():
if block in call_sites:
number_of_subfunction_calls = len(call_sites[block])
else:
number_of_subfunction_calls = 0
# there really shouldn't be blocks that can't be reached from the start, but there are for now
dist_start = distances_from_start[block] if block in distances_from_start else 10000
dist_exit = distances_from_exit[block] if block in distances_from_exit else 10000
attributes[block] = (dist_start, dist_exit, number_of_subfunction_calls)
return attributes
@staticmethod
def _distances_from_function_start(function):
"""
:param function: A normalized Function object.
:returns: A dictionary of basic block addresses and their distance to the start of the function.
"""
return networkx.single_source_shortest_path_length(function.graph,
function.startpoint)
@staticmethod
def _block_diff_constants(block_a, block_b):
diff_constants = []
for irsb_a, irsb_b in zip(block_a.blocks, block_b.blocks):
diff_constants += differing_constants(irsb_a, irsb_b)
return diff_constants
@staticmethod
def _distances_from_function_exit(function):
"""
:param function: A normalized Function object.
:returns: A dictionary of basic block addresses and their distance to the exit of the function.
"""
reverse_graph = function.graph.reverse()
# we aren't guaranteed to have an exit from the function so explicitly add the node
reverse_graph.add_node("start")
found_exits = False
for n in function.graph.nodes():
if len(function.graph.successors(n)) == 0:
reverse_graph.add_edge("start", n)
found_exits = True
# if there were no exits (a function with a while 1) let's consider the block with the highest address to
# be the exit. This isn't the most scientific way, but since this case is pretty rare it should be okay
if not found_exits:
last = max(function.graph.nodes(), key=lambda x:x.addr)
reverse_graph.add_edge("start", last)
dists = networkx.single_source_shortest_path_length(reverse_graph, "start")
# remove temp node
del dists["start"]
# correct for the added node
for n in dists:
dists[n] -= 1
return dists
def _compute_diff(self):
"""
Computes the diff of the functions and saves the result.
"""
# get the attributes for all blocks
l.debug("Computing diff of functions: %s, %s",
("%#x" % self._function_a.startpoint.addr) if self._function_a.startpoint is not None else "None",
("%#x" % self._function_b.startpoint.addr) if self._function_b.startpoint is not None else "None"
)
self.attributes_a = self._compute_block_attributes(self._function_a)
self.attributes_b = self._compute_block_attributes(self._function_b)
# get the initial matches
initial_matches = self._get_block_matches(self.attributes_a, self.attributes_b,
tiebreak_with_block_similarity=False)
# Use a queue so we process matches in the order that they are found
to_process = deque(initial_matches)
# Keep track of which matches we've already added to the queue
processed_matches = set((x, y) for (x, y) in initial_matches)
# Keep a dict of current matches, which will be updated if better matches are found
matched_a = dict()
matched_b = dict()
for (x, y) in processed_matches:
matched_a[x] = y
matched_b[y] = x
# while queue is not empty
while to_process:
(block_a, block_b) = to_process.pop()
l.debug("FunctionDiff: Processing (%#x, %#x)", block_a.addr, block_b.addr)
# we could find new matches in the successors or predecessors of functions
block_a_succ = self._function_a.graph.successors(block_a)
block_b_succ = self._function_b.graph.successors(block_b)
block_a_pred = self._function_a.graph.predecessors(block_a)
block_b_pred = self._function_b.graph.predecessors(block_b)
# propagate the difference in blocks as delta
delta = tuple((i-j) for i, j in zip(self.attributes_b[block_b], self.attributes_a[block_a]))
# get possible new matches
new_matches = []
# if the blocks are identical then the successors should most likely be matched in the same order
if self.blocks_probably_identical(block_a, block_b) and len(block_a_succ) == len(block_b_succ):
ordered_succ_a = self._get_ordered_successors(self._project_a, block_a, block_a_succ)
ordered_succ_b = self._get_ordered_successors(self._project_b, block_b, block_b_succ)
new_matches += zip(ordered_succ_a, ordered_succ_b)
new_matches += self._get_block_matches(self.attributes_a, self.attributes_b, block_a_succ, block_b_succ,
delta, tiebreak_with_block_similarity=True)
new_matches += self._get_block_matches(self.attributes_a, self.attributes_b, block_a_pred, block_b_pred,
delta, tiebreak_with_block_similarity=True)
# for each of the possible new matches add it if it improves the matching
for (x, y) in new_matches:
if (x, y) not in processed_matches:
processed_matches.add((x, y))
l.debug("FunctionDiff: checking if (%#x, %#x) is better", x.addr, y.addr)
# if it's a better match than what we already have use it
if _is_better_match(x, y, matched_a, matched_b, self.attributes_a, self.attributes_b):
l.debug("FunctionDiff: adding possible match (%#x, %#x)", x.addr, y.addr)
if x in matched_a:
old_match = matched_a[x]
del matched_b[old_match]
if y in matched_b:
old_match = matched_b[y]
del matched_a[old_match]
matched_a[x] = y
matched_b[y] = x
to_process.appendleft((x, y))
# reformat matches into a set of pairs
self._block_matches = set((x, y) for (x, y) in matched_a.items())
# get the unmatched blocks
self._unmatched_blocks_from_a = set(x for x in self._function_a.graph.nodes() if x not in matched_a)
self._unmatched_blocks_from_b = set(x for x in self._function_b.graph.nodes() if x not in matched_b)
@staticmethod
def _get_ordered_successors(project, block, succ):
try:
# add them in order of the vex
addr = block.addr
succ = set(succ)
ordered_succ = []
bl = project.factory.block(addr)
for x in bl.vex.all_constants:
if x in succ:
ordered_succ.append(x)
# add the rest (sorting might be better than no order)
for s in sorted(succ - set(ordered_succ), key=lambda x:x.addr):
ordered_succ.append(s)
return ordered_succ
except AngrMemoryError:
return sorted(succ, key=lambda x:x.addr)
except AngrTranslationError:
return sorted(succ, key=lambda x:x.addr)
def _get_block_matches(self, attributes_a, attributes_b, filter_set_a=None, filter_set_b=None, delta=(0, 0, 0),
tiebreak_with_block_similarity=False):
"""
:param attributes_a: A dict of blocks to their attributes
:param attributes_b: A dict of blocks to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the blocks in this set.
:param filter_set_b: A set to limit attributes_b to the blocks in this set.
:param delta: An offset to add to each vector in attributes_a.
:returns: A list of tuples of matching objects.
"""
# get the attributes that are in the sets
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
# add delta
for k in filtered_attributes_a:
filtered_attributes_a[k] = tuple((i+j) for i, j in zip(filtered_attributes_a[k], delta))
for k in filtered_attributes_b:
filtered_attributes_b[k] = tuple((i+j) for i, j in zip(filtered_attributes_b[k], delta))
# get closest
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
if tiebreak_with_block_similarity:
# use block similarity to break ties in the first set
for a in closest_a:
if len(closest_a[a]) > 1:
best_similarity = 0
best = []
for x in closest_a[a]:
similarity = self.block_similarity(a, x)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_a[a] = best
# use block similarity to break ties in the second set
for b in closest_b:
if len(closest_b[b]) > 1:
best_similarity = 0
best = []
for x in closest_b[b]:
similarity = self.block_similarity(x, b)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_b[b] = best
# a match (x,y) is good if x is the closest to y and y is the closest to x
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches
def _get_acceptable_constant_differences(self, block_a, block_b):
# keep a set of the acceptable differences in constants between the two blocks
acceptable_differences = set()
acceptable_differences.add(0)
block_a_base = block_a.instruction_addrs[0]
block_b_base = block_b.instruction_addrs[0]
acceptable_differences.add(block_b_base - block_a_base)
# get matching successors
for target_a, target_b in zip(block_a.call_targets, block_b.call_targets):
# these can be none if we couldn't resolve the call target
if target_a is None or target_b is None:
continue
acceptable_differences.add(target_b - target_a)
acceptable_differences.add((target_b - block_b_base) - (target_a - block_a_base))
# get the difference between the data segments
# this is hackish
if ".bss" in self._project_a.loader.main_bin.sections_map and \
".bss" in self._project_b.loader.main_bin.sections_map:
bss_a = self._project_a.loader.main_bin.sections_map[".bss"].min_addr
bss_b = self._project_b.loader.main_bin.sections_map[".bss"].min_addr
acceptable_differences.add(bss_b - bss_a)
acceptable_differences.add((bss_b - block_b_base) - (bss_a - block_a_base))
return acceptable_differences
class BinDiff(Analysis):
"""
This class computes the a diff between two binaries represented by angr Projects
"""
def __init__(self, other_project, enable_advanced_backward_slicing=False, cfg_a=None, cfg_b=None):
"""
:param other_project: The second project to diff
"""
l.debug("Computing cfg's")
back_traversal = not enable_advanced_backward_slicing
if cfg_a is None:
self.cfg_a = self.project.analyses.CFG(context_sensitivity_level=1,
keep_state=True,
enable_symbolic_back_traversal=back_traversal,
enable_advanced_backward_slicing=enable_advanced_backward_slicing)
self.cfg_b = other_project.analyses.CFG(context_sensitivity_level=1,
keep_state=True,
enable_symbolic_back_traversal=back_traversal,
enable_advanced_backward_slicing=enable_advanced_backward_slicing)
else:
self.cfg_a = cfg_a
self.cfg_b = cfg_b
l.debug("Done computing cfg's")
self._p2 = other_project
self._attributes_a = dict()
self._attributes_a = dict()
self._function_diffs = dict()
self.function_matches = set()
self._unmatched_functions_from_a = set()
self._unmatched_functions_from_b = set()
self._compute_diff()
def functions_probably_identical(self, func_a_addr, func_b_addr, check_consts=False):
"""
Compare two functions and return True if they appear identical.
:param func_a_addr: The address of the first function (in the first binary).
:param func_b_addr: The address of the second function (in the second binary).
:returns: Whether or not the functions appear to be identical.
"""
if self.cfg_a.project.is_hooked(func_a_addr) and self.cfg_b.project.is_hooked(func_b_addr):
return self.cfg_a.project._sim_procedures[func_a_addr] == self.cfg_b.project._sim_procedures[func_b_addr]
func_diff = self.get_function_diff(func_a_addr, func_b_addr)
if check_consts:
return func_diff.probably_identical_with_consts
return func_diff.probably_identical
@property
def identical_functions(self):
"""
:returns: A list of function matches that appear to be identical
"""
identical_funcs = []
for (func_a, func_b) in self.function_matches:
if self.functions_probably_identical(func_a, func_b):
identical_funcs.append((func_a, func_b))
return identical_funcs
@property
def differing_functions(self):
"""
:returns: A list of function matches that appear to differ
"""
different_funcs = []
for (func_a, func_b) in self.function_matches:
if not self.functions_probably_identical(func_a, func_b):
different_funcs.append((func_a, func_b))
return different_funcs
def differing_functions_with_consts(self):
"""
:return: A list of function matches that appear to differ including just by constants
"""
different_funcs = []
for (func_a, func_b) in self.function_matches:
if not self.functions_probably_identical(func_a, func_b, check_consts=True):
different_funcs.append((func_a, func_b))
return different_funcs
@property
def differing_blocks(self):
"""
:returns: A list of block matches that appear to differ
"""
differing_blocks = []
for (func_a, func_b) in self.function_matches:
differing_blocks.extend(self.get_function_diff(func_a, func_b).differing_blocks)
return differing_blocks
@property
def identical_blocks(self):
"""
:return A list of all block matches that appear to be identical
"""
identical_blocks = []
for (func_a, func_b) in self.function_matches:
identical_blocks.extend(self.get_function_diff(func_a, func_b).identical_blocks)
return identical_blocks
@property
def blocks_with_differing_constants(self):
"""
:return: A dict of block matches with differing constants to the tuple of constants
"""
diffs = dict()
for (func_a, func_b) in self.function_matches:
diffs.update(self.get_function_diff(func_a, func_b).blocks_with_differing_constants)
return diffs
@property
def unmatched_functions(self):
return self._unmatched_functions_from_a, self._unmatched_functions_from_b
# gets the diff of two functions in the binaries
def get_function_diff(self, function_addr_a, function_addr_b):
"""
:param function_addr_a: The address of the first function (in the first binary)
:param function_addr_b: The address of the second function (in the second binary)
:returns: the FunctionDiff of the two functions
"""
pair = (function_addr_a, function_addr_b)
if pair not in self._function_diffs:
function_a = self.cfg_a.kb.functions.function(function_addr_a)
function_b = self.cfg_b.kb.functions.function(function_addr_b)
self._function_diffs[pair] = FunctionDiff(function_a, function_b, self)
return self._function_diffs[pair]
@staticmethod
def _compute_function_attributes(cfg):
"""
:param cfg: An angr CFG object
:returns: a dictionary of function addresses to tuples of attributes
"""
# the attributes we use are the number of basic blocks, number of edges, and number of subfunction calls
attributes = dict()
all_funcs = set(cfg.kb.callgraph.nodes())
for function_addr in cfg.kb.functions:
# skip syscalls and functions which are None in the cfg
if cfg.kb.functions.function(function_addr) is None or cfg.kb.functions.function(function_addr).is_syscall:
continue
if cfg.kb.functions.function(function_addr) is not None:
normalized_funtion = NormalizedFunction(cfg.kb.functions.function(function_addr))
number_of_basic_blocks = len(normalized_funtion.graph.nodes())
number_of_edges = len(normalized_funtion.graph.edges())
else:
number_of_basic_blocks = 0
number_of_edges = 0
if function_addr in all_funcs:
number_of_subfunction_calls = len(cfg.kb.callgraph.successors(function_addr))
else:
number_of_subfunction_calls = 0
attributes[function_addr] = (number_of_basic_blocks, number_of_edges, number_of_subfunction_calls)
return attributes
def _get_call_site_matches(self, func_a, func_b):
possible_matches = set()
# Make sure those functions are not SimProcedures
f_a = self.cfg_a.kb.functions.function(func_a)
f_b = self.cfg_b.kb.functions.function(func_b)
if f_a.startpoint is None or f_b.startpoint is None:
return possible_matches
fd = self.get_function_diff(func_a, func_b)
basic_block_matches = fd.block_matches
function_a = fd._function_a
function_b = fd._function_b
for (a, b) in basic_block_matches:
if a in function_a.call_sites and b in function_b.call_sites:
# add them in order
for target_a, target_b in zip(function_a.call_sites[a], function_b.call_sites[b]):
possible_matches.add((target_a, target_b))
# add them in reverse, since if a new call was added the ordering from each side
# will remain constant until the change
for target_a, target_b in zip(reversed(function_a.call_sites[a]),
reversed(function_b.call_sites[b])):
possible_matches.add((target_a, target_b))
return possible_matches
def _get_plt_matches(self):
plt_matches = []
for name, addr in self.project.loader.main_bin.plt.items():
if name in self._p2.loader.main_bin.plt:
plt_matches.append((addr, self._p2.loader.main_bin.plt[name]))
# in the case of sim procedures the actual sim procedure might be in the interfunction graph, not the plt entry
func_to_addr_a = dict()
func_to_addr_b = dict()
for (k, v) in self.project._sim_procedures.items():
if "resolves" in v[1]:
func_to_addr_a[v[1]['resolves']] = k
for (k, v) in self._p2._sim_procedures.items():
if "resolves" in v[1]:
func_to_addr_b[v[1]['resolves']] = k
for name, addr in func_to_addr_a.items():
if name in func_to_addr_b:
plt_matches.append((addr, func_to_addr_b[name]))
# remove ones that aren't in the interfunction graph, because these seem to not be consistent
all_funcs_a = set(self.cfg_a.kb.callgraph.nodes())
all_funcs_b = set(self.cfg_b.kb.callgraph.nodes())
plt_matches = [x for x in plt_matches if x[0] in all_funcs_a and x[1] in all_funcs_b]
return plt_matches
def _compute_diff(self):
# get the attributes for all functions
self.attributes_a = self._compute_function_attributes(self.cfg_a)
self.attributes_b = self._compute_function_attributes(self.cfg_b)
# get the initial matches
initial_matches = self._get_plt_matches()
initial_matches += self._get_function_matches(self.attributes_a, self.attributes_b)
for (a, b) in initial_matches:
l.debug("Initally matched (%#x, %#x)", a, b)
# Use a queue so we process matches in the order that they are found
to_process = deque(initial_matches)
# Keep track of which matches we've already added to the queue
processed_matches = set((x, y) for (x, y) in initial_matches)
# Keep a dict of current matches, which will be updated if better matches are found
matched_a = dict()
matched_b = dict()
for (x, y) in processed_matches:
matched_a[x] = y
matched_b[y] = x
# while queue is not empty
while to_process:
(func_a, func_b) = to_process.pop()
l.debug("Processing (%#x, %#x)", func_a, func_b)
# we could find new matches in the successors or predecessors of functions
if not self.project.loader.main_bin.contains_addr(func_a):
continue
if not self._p2.loader.main_bin.contains_addr(func_b):
continue
func_a_succ = self.cfg_a.kb.callgraph.successors(func_a)
func_b_succ = self.cfg_b.kb.callgraph.successors(func_b)
func_a_pred = self.cfg_a.kb.callgraph.predecessors(func_a)
func_b_pred = self.cfg_b.kb.callgraph.predecessors(func_b)
# get possible new matches
new_matches = set(self._get_function_matches(self.attributes_a, self.attributes_b,
func_a_succ, func_b_succ))
new_matches |= set(self._get_function_matches(self.attributes_a, self.attributes_b,
func_a_pred, func_b_pred))
# could also find matches as function calls of matched basic blocks
new_matches.update(self._get_call_site_matches(func_a, func_b))
# for each of the possible new matches add it if it improves the matching
for (x, y) in new_matches:
# skip none functions and syscalls
if self.cfg_a.kb.functions.function(x) is None or self.cfg_a.kb.functions.function(x).is_syscall:
continue
if self.cfg_b.kb.functions.function(y) is None or self.cfg_b.kb.functions.function(y).is_syscall:
continue
if (x, y) not in processed_matches:
processed_matches.add((x, y))
# if it's a better match than what we already have use it
l.debug("Checking function match %s, %s", hex(x), hex(y))
if _is_better_match(x, y, matched_a, matched_b, self.attributes_a, self.attributes_b):
l.debug("Adding potential match %s, %s", hex(x), hex(y))
if x in matched_a:
old_match = matched_a[x]
del matched_b[old_match]
l.debug("Removing previous match (%#x, %#x)", x, old_match)
if y in matched_b:
old_match = matched_b[y]
del matched_a[old_match]
l.debug("Removing previous match (%#x, %#x)", old_match, y)
matched_a[x] = y
matched_b[y] = x
to_process.appendleft((x, y))
# reformat matches into a set of pairs
self.function_matches = set()
for x,y in matched_a.items():
# only keep if the pair is in the binary ranges
if self.project.loader.main_bin.contains_addr(x) and self._p2.loader.main_bin.contains_addr(y):
self.function_matches.add((x, y))
# get the unmatched functions
self._unmatched_functions_from_a = set(x for x in self.attributes_a.keys() if x not in matched_a)
self._unmatched_functions_from_b = set(x for x in self.attributes_b.keys() if x not in matched_b)
# remove unneeded function diffs
for (x, y) in dict(self._function_diffs):
if (x, y) not in self.function_matches:
del self._function_diffs[(x, y)]
@staticmethod
def _get_function_matches(attributes_a, attributes_b, filter_set_a=None, filter_set_b=None):
"""
:param attributes_a: A dict of functions to their attributes
:param attributes_b: A dict of functions to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the functions in this set.
:param filter_set_b: A set to limit attributes_b to the functions in this set.
:returns: A list of tuples of matching objects.
"""
# get the attributes that are in the sets
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
# get closest
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
# a match (x,y) is good if x is the closest to y and y is the closest to x
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches
register_analysis(BinDiff, 'BinDiff')
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 28 09:47:17 2015
@author: ibackus
"""
# External imports
import os
import numpy as np
import cPickle as pickle
from warnings import warn
# Internal imports
from pbmov import pbmov_utils
# Setup defaults file
_dir = os.path.dirname(os.path.abspath(__file__))
fDefaults = os.path.join(_dir, '.keyframerc.py')
if not os.path.exists(fDefaults):
fDefaults = os.path.join(_dir, 'defaults.py')
class movieSettings():
"""
Defines the movieSettings class, used by pbmov for handling keyframes and
storing movie settings.
Examples
--------
>>> from movieSettings import movieSettings
>>> settings = movieSettings()
Change a movie-wide setting
>>> settings.params['nt'] = 50
Add a keyframe
>>> settings.addKeyframes('cam', [0,1,0], frames=49, zero_slope=True)
Delete a keyframe
>>> settings.delKeyframes('cam', 0)
Make sure frame-by-frame data is up-to-date
>>> settings.makeFrames()
Add frames without changing the total movie-time:
>>> settings.timeStretch(200, adjustFPS=True)
Retrieve a frame:
>>> frameData = settings.getFrame(25)
Notes
-----
Defaults are handled by defaults.py. To change the defaults, create a
.keyframerc.py file in the same folder as defaults.py
Movie-wide settings are stored in settings.params
Keyframes are stored in settings.keyframes
"""
def __init__(self):
self.defaults()
self.frames = {}
self.makeFrames()
def __getstate__(self):
"""
Defined to make self pickleable
"""
state = {'params': self.params, 'keyframes': self.keyframes}
return state
def __setstate__(self, newstate):
"""
Defined to make self un-pickleable
"""
self.__init__()
self.params = newstate['params']
self.keyframes = newstate['keyframes']
self.makeFrames()
def timeStretch(self, ntNew, adjustFPS=False):
"""
Increases the number of time steps without changing the relative time of
the keyframes.
Note: if the number of timesteps is decreased, multiple keyframes may
overlap. Earlier keyframes are deleted (ignored) first
Parameters
----------
ntNew : int
New number of time steps (frames) for the movie
adjustFPS : bool
(optional) If True, the framerate is changed to keep the movie
runtime constant
"""
ntOld = self.params['nt']
for k, v in self.keyframes.iteritems():
self.keyframes[k] = timeStretch(v, ntNew, ntOld)
self.params['nt'] = ntNew
self.makeFrames()
if adjustFPS:
self.params['fps'] *= float(ntNew)/ntOld
def defaults(self, filename=None):
"""
Sets up the default values for movie parameters
"""
if filename is None:
filename = fDefaults
g = {}
execfile(filename, g)
params = g['params']
keyframes = g['keyframes']
logvalues = g['logvalues']
# Save to self
self.params = params
self.keyframes = keyframes
self._logvalues = logvalues
def makeFrames(self, key=None):
"""
Generates the frames for key (or all keys) by interpolating the
keyFrames. Frames are stored in self.frames[key]
Parameters
----------
key : str
(optional) Key to generate. If None, all keys are generated
"""
# If there are no arguments, loop through and make frames for alll
# keys
if key is None:
for k in self.keyframes.keys():
self.makeFrames(k)
return
# ---------------
# make frames
# ---------------
nt = self.params['nt']
# Get a copy of the keyframe value
keyframe = self.keyframes[key].copy()
# Check for Nones
for val in keyframe.values():
if val[0] is None:
# Make all values None. There's no way to interpolate a None
self.frames[key] = np.array([None]*nt)
return
# Generate interpolator
log = self.islogval(key)
interp = pbmov_utils.interpKeyframes(keyframe, nt, log)
# Evaluate interpolator
self.frames[key] = interp(np.arange(nt))
return
def islogval(self, key):
"""
Check to see if a key is expected to be a logarithmic value
"""
default_log_keys = self._logvalues
# Default log (True or False)
log = (key in default_log_keys)
if key in ('vmin', 'vmax'):
# Check if we are rendering using logarithmic color coding
# Default to previous value of 'log' not set in pynbody args
log = self.params['pbkwargs'].get('log', log)
return log
def getFrame(self, frame):
"""
Retrieves the key, val pairs at a given frame.
(By default, runs self.makeFrames())
Parameters
----------
frame : int
Frame number
Returns
-------
frameDict : dict
Dictionary containing all the key, val pairs at frame
"""
self.makeFrames()
frameDict = {}
for key in self.keyframes.keys():
frameDict[key] = self.frames[key][frame]
return frameDict
def addKeyframes(self, key, vals, frames=0, zero_slope=False):
"""
Adds keyframe(s), specified by the key, frame number(s), and value(s)
Note that vals, frames, and zero_slope should be the same length
Parameters
----------
key : str
key (parameter) that the keyframe controls
vals : obj or list of obj
The value(s) at each frame. If frames is a list, should be a list
of the same length
frames : int or list of ints
The frame number (numbers). If a list, vals should be a list of
values
zero_slope : bool or list of bools
(see pbmov_utils.interpolate) A flag which tells whether the value
specified by key should change slowly around the current frame
"""
# Turn into lists
if not hasattr(frames, '__iter__'):
frames = [frames]
vals = [vals]
nFrames = len(frames)
if isinstance(zero_slope, bool):
zero_slope = [zero_slope] * nFrames
if key not in self.keyframes:
self.keyframes[key] = {}
for i in range(nFrames):
self.keyframes[key][frames[i]] = [vals[i], zero_slope[i]]
# Update the frames
self.makeFrames(key)
def delKeyframes(self, key, frames):
"""
Deletes keyframe(s) from self, specified by key, frame. Nothing is
done if the keyframe is not present.
Parameters
----------
key : str
Key to delete, e.g. 'cam' or 'target'
frame : int or list of ints
keyframe number(s) to delete
"""
if not hasattr(frames, '__iter__'):
# Assume frames is an int, make it a list
frames = [frames]
# Loop through all frames
for frame in frames:
try:
# Delete the keyframe
del self.keyframes[key][frame]
except KeyError:
# Assume the keyframe is not present
pass
# Update the frames
self.makeFrames(key)
return
def save(self, filename='movieSettings.p'):
"""
Save to filename
Parameters
----------
filename : str
Filename to save to
"""
pickle.dump(self, open(filename, 'w'))
def timeStretch(keyframes, ntNew, ntOld=None):
"""
Increases the number of time steps without changing the relative time of
the keyframes.
Note: if the number of timesteps is decreased, multiple keyframes may
overlap. Earlier keyframes are deleted (ignored) first
Parameters
----------
keyframes : dict
Keyframes dict (see movieSettings) for a single key.
ntNew : int
New number of time steps (frames) for the movie
ntOld : int
(optional) old number of time steps (frames) for the movie. If None,
it the last keyframe is assumed to be the last frame of the movie.
Returns
-------
newframes : dict
Updated keyframes
"""
keyframes = keyframes.copy()
# Do nothing if nt doesn't change
if ntOld == ntNew:
return keyframes
# Original keys (frame numbers)
oldKeys = np.array(keyframes.keys())
oldKeys.sort()
if ntOld is None:
ntOld = oldKeys[-1] + 1
# Scale frame numbers
newKeys = (float(ntNew)/ntOld) * oldKeys
newKeys = np.round(newKeys).astype(int)
# Apply maximum frame number
newKeys[newKeys >= ntNew-1] = ntNew-1
# check for duplicates
if len(set(newKeys)) < len(newKeys):
warn('Duplicate keys detected after timestretch. Earlier keys will '
'be ignored')
newframes = {}
for new, old in zip(newKeys, oldKeys):
newframes[new] = keyframes[old]
return newframes
|
|
"""Training objective for self-critical learning.
Self-critic learning is a modification of the REINFORCE algorithm that uses the
reward of the train-time decoder output as a baseline in the update step.
For more details see: https://arxiv.org/pdf/1612.00563.pdf
"""
from typing import Callable, Iterable, Tuple
from itertools import takewhile
from collections import Counter
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.trainers.generic_trainer import Objective
from neuralmonkey.decoders.decoder import Decoder
from neuralmonkey.vocabulary import END_TOKEN_INDEX
# pylint: disable=invalid-name
RewardFunction = Callable[[np.ndarray, np.ndarray], np.ndarray]
# pylint: enable=invalid-name
def reinforce_score(reward: tf.Tensor,
baseline: tf.Tensor,
decoded: tf.Tensor,
logits: tf.Tensor) -> tf.Tensor:
"""Cost function whose derivative is the REINFORCE equation.
This implements the primitive function to the central equation of the
REINFORCE algorithm that estimates the gradients of the loss with respect
to decoder logits.
It uses the fact that the second term of the product (the difference of the
word distribution and one hot vector of the decoded word) is a derivative
of negative log likelihood of the decoded word. The reward function and the
baseline are however treated as a constant, so they influence the derivate
only multiplicatively.
"""
# shape (1, batch, 1)
reward_diff = tf.expand_dims(reward - baseline, 0)
# runtime probabilities, shape (time, batch, vocab)
decoded_neg_likelihood = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=decoded, logits=logits)
# REINFORCE gradient, shape (time, batch, vocab)
score = tf.stop_gradient(reward_diff) * decoded_neg_likelihood
return score
def self_critical_objective(decoder: Decoder,
reward_function: RewardFunction,
weight: float = None) -> Objective:
"""Self-critical objective.
Args:
decoder: A recurrent decoder.
reward_function: A reward function computing score in Python.
weight: Mixing weight for a trainer.
Returns:
Objective object to be used in generic trainer.
"""
check_argument_types()
# decoded, shape (time, batch)
train_decoded = tf.argmax(decoder.train_logits, axis=2)
runtime_decoded = tf.argmax(decoder.runtime_logits, axis=2)
reference = decoder.train_inputs
# rewards, shape (batch)
train_reward = tf.py_func(
reward_function, [reference, train_decoded], tf.float32)
runtime_reward = tf.py_func(
reward_function, [reference, runtime_decoded], tf.float32)
tf.summary.scalar(
"train_{}/{}".format(decoder.data_id, reward_function.__name__),
tf.reduce_mean(runtime_reward),
collections=["summary_train"])
# REINFORCE score: shape (time, batch, vocab)
score_by_word = reinforce_score(
runtime_reward, train_reward, runtime_decoded, decoder.runtime_logits)
float_mask = tf.to_float(decoder.runtime_mask)
masked_score_by_word = score_by_word * float_mask
# sum the matrix up (dot product of rows, sum over time, and over batch)
# pylint: disable=invalid-unary-operand-type
loss = -tf.reduce_sum(masked_score_by_word) / tf.reduce_sum(float_mask)
# pylint: enable=invalid-unary-operand-type
tf.summary.scalar(
"train_{}/self_critical_cost".format(decoder.data_id),
loss,
collections=["summary_train"])
return Objective(
name="{}_self_critical".format(decoder.name),
decoder=decoder,
loss=loss,
gradients=None,
weight=weight)
def sentence_bleu(references: np.ndarray,
hypotheses: np.ndarray) -> np.ndarray:
"""Compute index-based sentence-level BLEU score.
Computes sentence level BLEU on indices outputed by the decoder, i.e.
whatever the decoder uses as a unit is used a token in the BLEU
computation, ignoring the tokens may be sub-word units.
"""
bleu_scores = []
for ref, hyp in zip(np.transpose(references),
np.transpose(hypotheses)):
matched_counts = []
hyp_n_grams_counts = []
for n in range(1, 5):
matched, total, _ = _count_matching_n_grams(ref, hyp, n)
if n > 1:
matched += 1
total += 1
matched_counts.append(matched)
hyp_n_grams_counts.append(total)
if hyp_n_grams_counts[0] == 0:
bleu_scores.append(0.)
else:
precision = (
np.prod(matched_counts) / np.prod(hyp_n_grams_counts)) ** .25
ref_len = sum(1 for _ in
takewhile(lambda i: i != END_TOKEN_INDEX, ref))
brevity_penalty = np.min([
1., np.exp(1 - ref_len / hyp_n_grams_counts[0])])
bleu_scores.append(brevity_penalty * precision)
assert all(0 <= s <= 1 for s in bleu_scores)
return np.array(bleu_scores, dtype=np.float32)
def sentence_gleu(references: np.ndarray,
hypotheses: np.ndarray) -> np.ndarray:
"""Compute index-based GLEU score.
GLEU score is a sentence-level metric used in Google's Neural MT as a
reward in reinforcement learning (https://arxiv.org/abs/1609.08144).
It is a minimum of precision and recall on 1- to 4-grams.
It operates over the indices emitted by the decoder which are not
necessarily tokens (could be characters or subword units).
"""
gleu_scores = []
for ref, hyp in zip(np.transpose(references),
np.transpose(hypotheses)):
matched_counts = []
hyp_n_grams_counts = []
ref_n_grams_counts = []
for n in range(1, 5):
matched, total_hyp, total_ref = _count_matching_n_grams(
ref, hyp, n)
matched_counts.append(matched)
hyp_n_grams_counts.append(total_hyp)
ref_n_grams_counts.append(total_ref)
precision = np.sum(matched_counts) / np.sum(hyp_n_grams_counts)
recall = np.sum(matched_counts) / np.sum(ref_n_grams_counts)
assert 0. <= precision <= 1.0
assert 0. <= recall <= 1.0
gleu_scores.append(min(precision, recall))
return np.array(gleu_scores, dtype=np.float32)
def _count_matching_n_grams(ref: np.ndarray,
hyp: np.ndarray,
n: int) -> Tuple[int, int, int]:
ref_counts = Counter() # type: Counter[str]
total_ref_n_grams = 0
for n_gram in _get_n_grams(ref, n):
ref_counts[str(n_gram)] += 1
total_ref_n_grams += 1
matched_n_grams = 0
total_hyp_n_grams = 0
hyp_n_grams = _get_n_grams(hyp, n)
for n_gram in hyp_n_grams:
n_gram_s = str(n_gram)
if ref_counts[n_gram_s] > 0:
matched_n_grams += 1
ref_counts[n_gram_s] -= 1
total_hyp_n_grams += 1
assert matched_n_grams <= total_hyp_n_grams
assert matched_n_grams <= total_ref_n_grams
return matched_n_grams, total_hyp_n_grams, total_ref_n_grams
def _get_n_grams(indices: np.ndarray, order: int) -> Iterable[np.ndarray]:
all_n_grams = [indices[i:i + order]
for i in range(len(indices) - order + 1)]
return takewhile(lambda g: g[-1] != END_TOKEN_INDEX, all_n_grams)
|
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
from six import string_types, text_type
import dateutil.parser
from dateutil.tz import tzlocal, tzutc
import botocore
from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.exceptions import InvalidDNSNameError, ClientError
from botocore.exceptions import MetadataRetrievalError
from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit
from botocore.vendored import requests
from botocore.compat import OrderedDict
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_SECURITY_CREDENTIALS_URL = (
'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
)
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile('[a-z0-9][a-z0-9\-]*[a-z0-9]')
RESTRICTED_REGIONS = [
'us-gov-west-1',
'fips-us-gov-west-1',
]
RETRYABLE_HTTP_ERRORS = (requests.Timeout, requests.ConnectionError)
S3_ACCELERATE_WHITELIST = ['dualstack']
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub('\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class InstanceMetadataFetcher(object):
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, url=METADATA_SECURITY_CREDENTIALS_URL):
self._timeout = timeout
self._num_attempts = num_attempts
self._url = url
def _get_request(self, url, timeout, num_attempts=1):
for i in range(num_attempts):
try:
response = requests.get(url, timeout=timeout)
except RETRYABLE_HTTP_ERRORS as e:
logger.debug("Caught exception while trying to retrieve "
"credentials: %s", e, exc_info=True)
else:
if response.status_code == 200:
return response
raise _RetriesExceededError()
def retrieve_iam_role_credentials(self):
data = {}
url = self._url
timeout = self._timeout
num_attempts = self._num_attempts
try:
r = self._get_request(url, timeout, num_attempts)
if r.content:
fields = r.content.decode('utf-8').split('\n')
for field in fields:
if field.endswith('/'):
data[field[0:-1]] = self.retrieve_iam_role_credentials(
url + field, timeout, num_attempts)
else:
val = self._get_request(
url + field,
timeout=timeout,
num_attempts=num_attempts).content.decode('utf-8')
if val[0] == '{':
val = json.loads(val)
data[field] = val
else:
logger.debug("Metadata service returned non 200 status code "
"of %s for url: %s, content body: %s",
r.status_code, url, r.content)
except _RetriesExceededError:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
num_attempts)
# We sort for stable ordering. In practice, this should only consist
# of one role, but may need revisiting if this expands in the future.
final_data = {}
for role_name in sorted(data):
final_data = {
'role_name': role_name,
'access_key': data[role_name]['AccessKeyId'],
'secret_key': data[role_name]['SecretAccessKey'],
'token': data[role_name]['Token'],
'expiry_time': data[role_name]['Expiration'],
}
return final_data
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
"""
if not isinstance(input_str, string_types):
input_str = text_type(input_str)
return quote(text_type(input_str).encode('utf-8'), safe=safe)
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name == 'float':
return 0.0
elif shape.type_name == 'boolean':
return True
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
return [
self._generate_skeleton(shape.member, stack),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
if n == 1:
if not bucket_name.isalnum():
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url='s3.amazonaws.com', **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing. This allows us to avoid 301 redirects for all
bucket names that can be CNAME'd.
"""
# By default we do not use virtual hosted style addressing when
# signed with signature version 4.
if signature_version is not botocore.UNSIGNED and \
's3v4' in signature_version:
return
elif not _allowed_region(region_name):
return
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def _allowed_region(region_name):
return region_name not in RESTRICTED_REGIONS
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
error = response[1].get('Error', {})
error_code = error.get('Code')
if error_code == '301':
# A raw 301 error might be returned for several reasons, but we
# only want to try to redirect it if it's a HeadObject or
# HeadBucket because all other operations will return
# PermanentRedirect if region is incorrect.
if operation.name not in ['HeadObject', 'HeadBucket']:
return
elif error_code != 'PermanentRedirect':
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = requests.Session()
self._session = session
self._sleep = sleep
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self._full_url(relative_uri)
headers = {'Accept': 'application/json'}
attempts = 0
while True:
try:
return self._get_response(full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"ECS metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
response = self._session.get(full_url, headers=headers,
timeout=timeout)
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg="Received non 200 response (%s) from ECS metadata: %s"
% (response.status_code, response.text))
try:
return json.loads(response.text)
except ValueError:
raise MetadataRetrievalError(
error_msg=("Unable to parse JSON returned from "
"ECS metadata: %s" % response.text))
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def _full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
|
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Define bytecode interpreter that supports iteration on bytecode"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import sys
import dis
from dis import opmap
from collections import defaultdict
from opcode import HAVE_ARGUMENT, cmp_op
from .instructions import Instruction
CALL_FUNCTIONS = {
opmap["CALL_FUNCTION"],
opmap["CALL_FUNCTION_KW"],
}
if sys.version_info < (3, 6):
CALL_FUNCTIONS.add(opmap["CALL_FUNCTION_VAR"])
CALL_FUNCTIONS.add(opmap["CALL_FUNCTION_VAR_KW"])
PRINT_ITEMS = set()
if "PRINT_ITEM" in opmap:
PRINT_ITEMS.add(opmap["PRINT_ITEM"])
PRINT_ITEMS.add(opmap["PRINT_ITEM_TO"])
PRINT_NEW_LINES = set()
if "PRINT_NEWLINE" in opmap:
PRINT_NEW_LINES.add(opmap["PRINT_NEWLINE"])
PRINT_NEW_LINES.add(opmap["PRINT_NEWLINE_TO"])
SETUP_WITH = {opmap["SETUP_WITH"], }
WITH_CLEANUP = {opmap.get("WITH_CLEANUP") or opmap.get("WITH_CLEANUP_START"), }
SETUP_ASYNC_WITH = set()
if "SETUP_ASYNC_WITH" in opmap:
SETUP_ASYNC_WITH.add(opmap["SETUP_ASYNC_WITH"])
IMPORTS = {opmap["IMPORT_NAME"], opmap["IMPORT_FROM"]}
IMPORT_NAMES = {opmap["IMPORT_NAME"],}
FOR_ITERS = {opmap["FOR_ITER"],}
GET_ITERS = {opmap["GET_ITER"],}
def cord(value):
"""Convert (str or int) to ord"""
if isinstance(value, str):
return ord(value)
return value
class ListAccessor(object): # pylint: disable=too-few-public-methods
"""List Proxy. Return value on x[i] and tuple on x(i)"""
def __init__(self, values, repr_is_val=True):
self.values = values
self.repr_is_val = repr_is_val
def __getitem__(self, index):
if self.values is not None:
return self.values[index]
return index
def __call__(self, index):
argval = self[index]
if self.repr_is_val and self.values is not None:
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr
class Interpreter(object): # pylint: disable=too-many-instance-attributes
"""Bytecode iterator"""
def __init__(self, co_code, varnames=None, names=None, constants=None, # pylint: disable=too-many-arguments
cells=None, linestarts=None, line_offset=0):
self.lasti = 0
self.opi = 0
self._extended_arg = 0
self._co_code = co_code
self.varnames = ListAccessor(varnames)
self.names = ListAccessor(names)
self.consts = ListAccessor(constants, repr_is_val=False)
self.cells = ListAccessor(cells)
self.linestarts = linestarts
self.line_offset = line_offset
self._size = len(co_code)
self.opcode = None
self.oparg = 0
self._stop = False
self._map = {}
self._extra = set()
self._missing = set()
self._supported = set()
if not hasattr(self, "_known_missing"):
self._known_missing = set()
self._create_map()
def __iter__(self):
"""Restart iterator"""
self._stop = False
return self
def __call__(self, lasti=0, extended_arg=0):
self.lasti = lasti
self._extended_arg = extended_arg
def next(self):
"""Python 2 iterator"""
if self._stop:
raise StopIteration
opcode = self._next_op()
self._map[opcode]()
return opcode
def __next__(self):
"""Python 3 iterator"""
return self.next()
def _next_op(self):
"""Get next operation"""
self._set_opcode()
if self.opcode >= HAVE_ARGUMENT:
self._have_argument()
if self.lasti >= self._size:
self._stop = True
return self.opcode
def _set_opcode(self):
"""Get op from code"""
self.oparg = None
self.opcode = cord(self._co_code[self.lasti])
self.opi = self.lasti
self.lasti += 1
def _have_argument(self):
"""Read argument if op has argument"""
cod = self._co_code
i = self.lasti
self.oparg = cord(cod[i]) + cord(cod[i + 1]) * 256 + self._extended_arg
self._extended_arg = 0
self.lasti += 2
def _create_map(self):
"""Create map of functions"""
condition = lambda x, obj: (
x[0] != "_" and hasattr(obj, "__call__") and
obj.__doc__ is not None and "opcode" in obj.__doc__)
to_opcode = lambda x: x.upper().replace("__", "+")
self._map = defaultdict(lambda: self.nop)
self._extra = set()
self._missing = set()
self._supported = set()
for name in dir(self):
method = getattr(self, name)
if condition(name, method):
opcode = to_opcode(name)
if opcode not in opmap:
self._extra.add(opcode)
else:
self._map[opmap[opcode]] = method
self._supported.add(opcode)
self._missing = (
set(opmap.keys()) - self._supported - self._known_missing)
@property
def extra_opcode(self):
"""Return opcode implemented by this class
but not supported by Python
"""
return self._extra
@property
def missing_opcode(self):
"""Return opcode supported by Python
but not implemented by this class"""
return self._missing
def nop(self):
"""NOP opcode"""
pass
class InstructionInterpreter(Interpreter):
"""Mix Python3 dis._get_instructions_bytes with Python2 dis.disassemble"""
def __init__(self, *args, **kwargs):
super(InstructionInterpreter, self).__init__(*args, **kwargs)
self._labels = dis.findlabels(self._co_code)
self.starts_line = None
self.is_jump_target = False
self.argval = None
self.argrepr = None
self.current_line = -1
def _set_opcode(self):
super(InstructionInterpreter, self)._set_opcode()
if self.linestarts is not None:
self.starts_line = self.linestarts.get(self.opi, None)
if self.starts_line is not None:
self.starts_line += self.line_offset
self.current_line = self.starts_line
self.is_jump_target = self.opi in self._labels
def _have_argument(self):
super(InstructionInterpreter, self)._have_argument()
opcode = self.opcode
arg = argval = self.oparg
argrepr = ""
if opcode in dis.hasconst:
argval, argrepr = self.consts(arg)
elif opcode in dis.hasname:
argval, argrepr = self.names(arg)
elif opcode in dis.hasjrel:
argval = self.lasti + arg
argrepr = "to " + repr(argval)
elif opcode in dis.haslocal:
argval, argrepr = self.varnames(arg)
elif opcode in dis.hascompare:
argval = cmp_op[arg]
argrepr = argval
elif opcode in dis.hasfree:
argval, argrepr = self.cells(arg)
elif opcode in CALL_FUNCTIONS:
argrepr = "%d positional, %d keyword pair" % (
cord(self._co_code[self.lasti - 2]),
cord(self._co_code[self.lasti - 1]))
self.argval, self.argrepr = argval, argrepr
def next(self):
super(InstructionInterpreter, self).next()
return Instruction(
dis.opname[self.opcode], self.opcode, self.oparg, self. argval,
self.argrepr, self.opi, self.starts_line, self.is_jump_target,
self.current_line)
|
|
"""Endpoint to generate footer HTML."""
import logging
import re
from functools import lru_cache
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.template import loader as template_loader
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jsonp.renderers import JSONPRenderer
from readthedocs.api.v2.mixins import CachedResponseMixin
from readthedocs.api.v2.permissions import IsAuthorizedToViewVersion
from readthedocs.builds.constants import LATEST, TAG
from readthedocs.builds.models import Version
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.projects.constants import MKDOCS, SPHINX_HTMLDIR
from readthedocs.projects.models import Project
from readthedocs.projects.version_handling import (
highest_version,
parse_version_failsafe,
)
log = logging.getLogger(__name__)
def get_version_compare_data(project, base_version=None):
"""
Retrieve metadata about the highest version available for this project.
:param base_version: We assert whether or not the base_version is also the
highest version in the resulting "is_highest" value.
"""
if (
not project.show_version_warning or
(base_version and base_version.is_external)
):
return {'is_highest': False}
versions_qs = (
Version.internal.public(project=project)
.filter(built=True, active=True)
)
# Take preferences over tags only if the project has at least one tag
if versions_qs.filter(type=TAG).exists():
versions_qs = versions_qs.filter(type=TAG)
# Optimization
versions_qs = versions_qs.select_related('project')
highest_version_obj, highest_version_comparable = highest_version(
versions_qs,
)
ret_val = {
'project': str(highest_version_obj),
'version': str(highest_version_comparable),
'is_highest': True,
}
if highest_version_obj:
# Never link to the dashboard,
# users reading the docs may don't have access to the dashboard.
ret_val['url'] = highest_version_obj.get_absolute_url()
ret_val['slug'] = highest_version_obj.slug
if base_version and base_version.slug != LATEST:
try:
base_version_comparable = parse_version_failsafe(
base_version.verbose_name,
)
if base_version_comparable:
# This is only place where is_highest can get set. All error
# cases will be set to True, for non- standard versions.
ret_val['is_highest'] = (
base_version_comparable >= highest_version_comparable
)
else:
ret_val['is_highest'] = True
except (Version.DoesNotExist, TypeError):
ret_val['is_highest'] = True
return ret_val
class BaseFooterHTML(CachedResponseMixin, APIView):
"""
Render and return footer markup.
Query parameters:
- project
- version
- page: Sphinx's page name (name of the source file),
used to build the "edit on" links.
- theme: Used to decide how to integrate the flyout menu.
- docroot: Path where all the source documents are.
Used to build the ``edit_on`` URL.
- source_suffix: Suffix from the source document.
Used to build the ``edit_on`` URL.
.. note::
The methods `_get_project` and `_get_version`
are called many times, so a basic cache is implemented.
"""
http_method_names = ['get']
permission_classes = [IsAuthorizedToViewVersion]
renderer_classes = [JSONRenderer, JSONPRenderer]
project_cache_tag = 'rtd-footer'
@lru_cache(maxsize=1)
def _get_project(self):
project_slug = self.request.GET.get('project', None)
project = get_object_or_404(Project, slug=project_slug)
return project
@lru_cache(maxsize=1)
def _get_version(self):
version_slug = self.request.GET.get('version', None)
# Hack in a fix for missing version slug deploy
# that went out a while back
if version_slug == '':
version_slug = LATEST
project = self._get_project()
version = get_object_or_404(
project.versions.all(),
slug__iexact=version_slug,
)
return version
def _get_active_versions_sorted(self):
"""Get all versions that the user has access, sorted."""
project = self._get_project()
versions = project.ordered_active_versions(
user=self.request.user,
include_hidden=False,
)
return versions
def _get_context(self):
theme = self.request.GET.get('theme', False)
docroot = self.request.GET.get('docroot', '')
source_suffix = self.request.GET.get('source_suffix', '.rst')
new_theme = (theme == 'sphinx_rtd_theme')
project = self._get_project()
main_project = project.main_language_project or project
version = self._get_version()
page_slug = self.request.GET.get('page', '')
path = ''
if page_slug and page_slug != 'index':
if version.documentation_type in {SPHINX_HTMLDIR, MKDOCS}:
path = re.sub('/index$', '', page_slug) + '/'
else:
path = page_slug + '.html'
context = {
'project': project,
'version': version,
'path': path,
'downloads': version.get_downloads(pretty=True),
'current_version': version.verbose_name,
'versions': self._get_active_versions_sorted(),
'main_project': main_project,
'translations': main_project.translations.all(),
'current_language': project.language,
'new_theme': new_theme,
'settings': settings,
'github_edit_url': version.get_github_url(
docroot,
page_slug,
source_suffix,
'edit',
),
'github_view_url': version.get_github_url(
docroot,
page_slug,
source_suffix,
'view',
),
'gitlab_edit_url': version.get_gitlab_url(
docroot,
page_slug,
source_suffix,
'edit',
),
'gitlab_view_url': version.get_gitlab_url(
docroot,
page_slug,
source_suffix,
'view',
),
'bitbucket_url': version.get_bitbucket_url(
docroot,
page_slug,
source_suffix,
),
}
return context
def get(self, request, format=None):
project = self._get_project()
version = self._get_version()
version_compare_data = get_version_compare_data(
project,
version,
)
context = self._get_context()
html = template_loader.get_template('restapi/footer.html').render(
context,
request,
)
show_version_warning = (
project.show_version_warning and
not version.is_external
)
resp_data = {
'html': html,
'show_version_warning': show_version_warning,
'version_active': version.active,
'version_compare': version_compare_data,
'version_supported': version.supported,
}
return Response(resp_data)
class FooterHTML(SettingsOverrideObject):
_default_class = BaseFooterHTML
|
|
# -*- coding: utf-8 -*-
"""
continuity.services.pt
~~~~~~~~~~~~~~~~~~~~~~
Pivotal Tracker API.
:copyright: 2015 by Jonathan Zempel.
:license: BSD, see LICENSE for more details.
"""
from .commons import IDObject, RemoteService, ServiceException
from .utils import cached_property, datetime_property
from requests import RequestException
from urlparse import urljoin
class Comment(IDObject):
"""Pivotal Tracker comment object.
"""
FIELDS = ":default,person"
@property
def author(self):
"""Comment author accessor.
"""
member = self.data.get("person")
return Member(member) if member else None
@datetime_property
def created(self):
"""Comment created accessor.
"""
return self.data.get("created_at")
@property
def text(self):
"""Comment text accessor.
"""
return self.data.get("text")
class Member(IDObject):
"""Pivotal Tracker member object.
:param member: Member data.
"""
def __init__(self, member):
super(Member, self).__init__(member)
if "person" in self.data:
person = self.data["person"]
self.data.update(person)
del self.data["person"]
def __str__(self):
"""Get a string representation of this Member.
"""
return self.name
@property
def email(self):
"""Person email accessor.
"""
return self.data.get("email")
@property
def initials(self):
"""Person initials accessor.
"""
return self.data.get("initials")
@property
def name(self):
"""Person name accessor.
"""
return self.data.get("name")
@property
def role(self):
"""Member role accessor.
"""
return self.data.get("role")
class Project(IDObject):
"""Pivotal Tracker project object.
"""
FIELDS = ":default,memberships"
@cached_property
def members(self):
"""Project membership accessor.
"""
ret_val = []
memberships = self.data.get("memberships")
for membership in memberships:
ret_val.append(Member(membership))
return ret_val
@property
def name(self):
"""Project name accessor.
"""
return self.data.get("name")
class Story(IDObject):
"""Pivotal Tracker story object.
"""
FIELDS = ":default,owners,requested_by"
STATE_UNSCHEDULED = "unscheduled"
STATE_UNSTARTED = "unstarted"
STATE_STARTED = "started"
STATE_FINISHED = "finished"
STATE_DELIVERED = "delivered"
STATE_ACCEPTED = "accepted"
STATE_REJECTED = "rejected"
TYPE_BUG = "bug"
TYPE_CHORE = "chore"
TYPE_FEATURE = "feature"
TYPE_RELEASE = "release"
@datetime_property
def created(self):
"""Story created accessor.
"""
return self.data.get("created_at")
@property
def description(self):
"""Story description accessor.
"""
return self.data.get("description")
@property
def estimate(self):
"""Story estimate accessor.
"""
value = self.data.get("estimate")
return int(value) if value else None
@property
def name(self):
"""Story name accessor.
"""
return self.data.get("name")
@property
def owners(self):
"""Story owners accessor.
"""
ret_val = []
owners = self.data.get("owners", [])
for owner in owners:
ret_val.append(Member(owner))
return ret_val
@property
def requester(self):
"""Story requester accessor.
"""
member = self.data.get("requested_by")
return Member(member) if member else None
@property
def state(self):
"""Story state accessor.
"""
return self.data.get("current_state")
@property
def type(self):
"""Story type accessor.
"""
return self.data.get("story_type")
@datetime_property
def updated(self):
"""Story updated accessor.
"""
return self.data.get("updated_at")
@property
def url(self):
"""Story URL accessor.
"""
return self.data.get("url")
class Iteration(IDObject):
"""Pivotal Tracker iteration object.
"""
FIELDS = ":default,stories({0})".format(Story.FIELDS)
@datetime_property
def finished(self):
"""Iteration finished accessor.
"""
return self.data.get("finish")
@property
def number(self):
"""Iteration number accessor.
"""
value = self.data.get("number")
return int(value) if value else None
@datetime_property
def started(self):
"""Iteration started accessor.
"""
return self.data.get("start")
@property
def stories(self):
"""Iteration stories accessor.
"""
ret_val = []
stories = self.data.get("stories")
for story in stories:
story = Story(story)
ret_val.append(story)
return ret_val
class Task(IDObject):
"""Pivotal Tracker task object.
"""
@datetime_property
def created(self):
"""Task created accessor.
"""
return self.data.get("created_at")
@property
def description(self):
"""Task description accessor.
"""
return self.data.get("description")
@property
def is_checked(self):
"""Determine if this task is checked.
"""
return self.data.get("complete")
@property
def number(self):
"""Task number accessor.
"""
value = self.data.get("position")
return int(value) if value else None
class PivotalTrackerException(ServiceException):
"""Base Pivotal Tracker exception.
"""
class PivotalTrackerService(RemoteService):
"""Pivotal Tracker service.
:param token: The API token to use.
"""
URI = "https://www.pivotaltracker.com/services/v5/"
def __init__(self, token):
super(PivotalTrackerService, self).__init__(PivotalTrackerService.URI)
self.token = token
def _request(self, method, resource, **kwargs):
"""Send a Pivotal Tracker request.
:param method: The HTTP method.
:param resource: The URI resource.
:param kwargs: Request keyword-arguments.
"""
headers = kwargs.get("headers", {})
headers["X-TrackerToken"] = self.token
if method.lower() in ("post", "put"):
headers["Content-Type"] = "application/json"
kwargs["headers"] = headers
try:
ret_val = super(PivotalTrackerService, self)._request(method,
resource, **kwargs)
except RequestException, e:
raise PivotalTrackerException(e)
return ret_val
def get_backlog(self, project, limit=None):
"""Get a list of stories in the backlog.
:param project: The project to use.
:param limit: Limit the number of iterations to get.
"""
ret_val = []
resource = "projects/{0:d}/iterations".format(project.id)
params = {"fields": Iteration.FIELDS, "scope": "current_backlog"}
if limit:
params["limit"] = limit
iterations = self._request("get", resource, params=params)
if not iterations:
params["scope"] = "backlog"
iterations = self._request("get", resource, params=params)
for iteration in iterations:
iteration = Iteration(iteration)
ret_val.extend(iteration.stories)
return ret_val
def get_comments(self, project, story):
"""Get the comments for the given story.
:param project: The project to use.
:param story: The story to use.
"""
ret_val = []
resource = "projects/{0:d}/stories/{1:d}/comments".format(project.id,
story.id)
params = {"fields": Comment.FIELDS}
comments = self._request("get", resource, params=params)
for comment in comments:
ret_val.append(Comment(comment))
return ret_val
def get_project(self, id):
"""Get a project for the given ID.
:param id: The ID of the project to get.
"""
for project in self.projects:
if project.id == int(id):
ret_val = project
break
else:
ret_val = None
return ret_val
def get_story(self, project, filter):
"""Get the next story for the given filter.
:param project: The project to use.
:param filter: The PT API filter. See
`https://www.pivotaltracker.com/help/faq#howcanasearchberefined`
for details.
"""
resource = "projects/{0:d}/stories".format(project.id)
params = {
"fields": Story.FIELDS,
"filter": "type:feature,chore,bug {0}".format(filter),
"limit": 1
}
stories = self._request("get", resource, params=params)
if len(stories) == 1:
ret_val = Story(stories[0])
else:
ret_val = None
return ret_val
def get_tasks(self, project, story):
"""Get the tasks for the given story.
:param project: The project to use.
:param story: The story to use.
"""
ret_val = []
resource = "projects/{0:d}/stories/{1:d}/tasks".format(project.id,
story.id)
tasks = self._request("get", resource)
for task in tasks:
ret_val.append(Task(task))
return ret_val
@staticmethod
def get_token(user, password):
"""Get an active token for the given user.
:param user: The user to get a token for.
:param password: The user password.
"""
url = urljoin(PivotalTrackerService.URI, "me")
auth = (user, password)
response = PivotalTrackerService.get_response("get", url, auth=auth)
try:
response.raise_for_status()
data = response.json()
ret_val = data["api_token"]
except RequestException:
ret_val = None
return ret_val
@cached_property
def projects(self):
"""Get a list of projects.
"""
ret_val = []
params = {"fields": Project.FIELDS}
projects = self._request("get", "projects", params=params)
for project in projects:
ret_val.append(Project(project))
return ret_val
def set_story(self, project, story, state, owner=None):
"""Set the state of the story for the given ID.
:param project: The project to use.
:param story: The story to update.
:param state: The updated story state: ``'unscheduled'``,
``'unstarted'``, ``'started'``, ``'finished'``, ``'delivered'``,
``'accepted'``, or ``'rejected'``.
:param owner: Default `None`. Optional story owner.
"""
resource = "projects/{0:d}/stories/{1:d}".format(project.id, story.id)
data = {"current_state": state, "fields": Story.FIELDS}
if owner:
data["owner_ids"] = [owner.id]
story = self._request("put", resource, data=data)
return Story(story)
def set_task(self, project, story, task, checked):
"""Set the completion of the given task.
:param project: The project to use.
:param story: The story the task is a part of.
:param task: The task to update.
:param checked: ``True`` to check the story as completed, otherwise
``False``.
"""
resource = "projects/{0:d}/stories/{1:d}/tasks/{2:d}".format(
project.id, story.id, task.id)
data = {"complete": checked}
task = self._request("put", resource, data=data)
return Task(task)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import afi_safi
class afi_safis(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Address family specific configuration
"""
__slots__ = ("_path_helper", "_extmethods", "__afi_safi")
_yang_name = "afi-safis"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__afi_safi = YANGDynClass(
base=YANGListType(
"afi_safi_name",
afi_safi.afi_safi,
yang_name="afi-safi",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="afi-safi-name",
extensions=None,
),
is_container="list",
yang_name="afi-safi",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
]
def _get_afi_safi(self):
"""
Getter method for afi_safi, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi (list)
YANG Description: AFI,SAFI configuration available for the
neighbour or group
"""
return self.__afi_safi
def _set_afi_safi(self, v, load=False):
"""
Setter method for afi_safi, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_safi is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_safi() directly.
YANG Description: AFI,SAFI configuration available for the
neighbour or group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"afi_safi_name",
afi_safi.afi_safi,
yang_name="afi-safi",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="afi-safi-name",
extensions=None,
),
is_container="list",
yang_name="afi-safi",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_safi must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("afi_safi_name",afi_safi.afi_safi, yang_name="afi-safi", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='afi-safi-name', extensions=None), is_container='list', yang_name="afi-safi", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__afi_safi = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_safi(self):
self.__afi_safi = YANGDynClass(
base=YANGListType(
"afi_safi_name",
afi_safi.afi_safi,
yang_name="afi-safi",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="afi-safi-name",
extensions=None,
),
is_container="list",
yang_name="afi-safi",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
afi_safi = __builtin__.property(_get_afi_safi, _set_afi_safi)
_pyangbind_elements = OrderedDict([("afi_safi", afi_safi)])
from . import afi_safi
class afi_safis(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Address family specific configuration
"""
__slots__ = ("_path_helper", "_extmethods", "__afi_safi")
_yang_name = "afi-safis"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__afi_safi = YANGDynClass(
base=YANGListType(
"afi_safi_name",
afi_safi.afi_safi,
yang_name="afi-safi",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="afi-safi-name",
extensions=None,
),
is_container="list",
yang_name="afi-safi",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
]
def _get_afi_safi(self):
"""
Getter method for afi_safi, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi (list)
YANG Description: AFI,SAFI configuration available for the
neighbour or group
"""
return self.__afi_safi
def _set_afi_safi(self, v, load=False):
"""
Setter method for afi_safi, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_safi is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_safi() directly.
YANG Description: AFI,SAFI configuration available for the
neighbour or group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"afi_safi_name",
afi_safi.afi_safi,
yang_name="afi-safi",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="afi-safi-name",
extensions=None,
),
is_container="list",
yang_name="afi-safi",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_safi must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("afi_safi_name",afi_safi.afi_safi, yang_name="afi-safi", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='afi-safi-name', extensions=None), is_container='list', yang_name="afi-safi", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__afi_safi = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_safi(self):
self.__afi_safi = YANGDynClass(
base=YANGListType(
"afi_safi_name",
afi_safi.afi_safi,
yang_name="afi-safi",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="afi-safi-name",
extensions=None,
),
is_container="list",
yang_name="afi-safi",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
afi_safi = __builtin__.property(_get_afi_safi, _set_afi_safi)
_pyangbind_elements = OrderedDict([("afi_safi", afi_safi)])
|
|
"""
missing types & inference
"""
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._libs.tslib import NaT, iNaT
from .generic import (ABCMultiIndex, ABCSeries,
ABCIndexClass, ABCGeneric,
ABCExtensionArray)
from .common import (is_string_dtype, is_datetimelike,
is_datetimelike_v_numeric, is_float_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_interval_dtype,
is_period_dtype,
is_complex_dtype,
is_string_like_dtype, is_bool_dtype,
is_integer_dtype, is_dtype_equal,
is_extension_array_dtype,
needs_i8_conversion, _ensure_object,
pandas_dtype,
is_scalar,
is_object_dtype,
is_integer,
_TD_DTYPE,
_NS_DTYPE)
from .inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
isneginf_scalar = libmissing.isneginf_scalar
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
isnull = isna
def _isna_new(obj):
if is_scalar(obj):
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass,
ABCExtensionArray)):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=isna))
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object))
elif hasattr(obj, '__array__'):
return _isna_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, '__array__'):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isna = _isna_new
def _use_inf_as_na(key):
"""Option change callback for na/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
from pandas.core.config import get_option
flag = get_option(key)
if flag:
globals()['_isna'] = _isna_old
else:
globals()['_isna'] = _isna_new
def _isna_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_extension_array_dtype(obj):
if isinstance(obj, (ABCIndexClass, ABCSeries)):
values = obj._values
else:
values = obj
result = values.isna()
elif is_interval_dtype(values):
# TODO(IntervalArray): remove this if block
from pandas import IntervalIndex
result = IntervalIndex(obj).isna()
elif is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
# object array of strings
result = np.zeros(values.shape, dtype=bool)
else:
# object array of non-strings
result = np.empty(shape, dtype=bool)
vec = libmissing.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
elif needs_i8_conversion(obj):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isna_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = libmissing.isnaobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif is_datetime64_dtype(dtype):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notna(obj):
"""
Detect non-missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are valid (not missing, which is ``NaN`` in numeric
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : array-like or object value
Object to check for *not* null or *non*-missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is valid.
See Also
--------
isna : boolean inverse of pandas.notna.
Series.notna : Detect valid values in a Series.
DataFrame.notna : Detect valid values in a DataFrame.
Index.notna : Detect valid values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.notna('dog')
True
>>> pd.notna(np.nan)
False
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.notna(array)
array([[ True, False, True],
[ True, True, False]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.notna(index)
array([ True, True, False, True])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.notna(df)
0 1 2
0 True True True
1 True False True
>>> pd.notna(df[1])
0 True
1 False
Name: 1, dtype: bool
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
if other is NaT or other is None:
return True
elif is_scalar(other):
# a timedelta
if hasattr(other, 'dtype'):
return other.view('i8') == iNaT
elif is_integer(other) and other == iNaT:
return True
return isna(other)
return False
def _isna_compat(arr, fill_value=np.nan):
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
dtype = arr.dtype
if isna(fill_value):
return not (is_bool_dtype(dtype) or
is_integer_dtype(dtype))
return True
def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left) or is_string_dtype(right):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
_ensure_object(left.ravel()), _ensure_object(right.ravel()))
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if (not isinstance(right_value, float) or
not np.isnan(right_value)):
return False
else:
if left_value != right_value:
return False
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left) or is_complex_dtype(left):
# empty
if not (np.prod(left.shape) and np.prod(right.shape)):
return True
return ((left == right) | (isna(left) & isna(right))).all()
# numpy will will not allow this type of datetimelike vs integer comparison
elif is_datetimelike_v_numeric(left, right):
return False
# M8/m8
elif needs_i8_conversion(left) and needs_i8_conversion(right):
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view('i8')
right = right.view('i8')
# if we have structured dtypes, compare first
if (left.dtype.type is np.void or
right.dtype.type is np.void):
if left.dtype != right.dtype:
return False
return np.array_equal(left, right)
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if is_datetimelike(val):
return np.array('NaT', dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(_ensure_object(val))
if dtype in ['datetime', 'datetime64']:
return np.array('NaT', dtype=_NS_DTYPE)
elif dtype in ['timedelta', 'timedelta64']:
return np.array('NaT', dtype=_TD_DTYPE)
return np.nan
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype, compat=True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : boolean, default True
Returns
-------
np.dtype or a pandas dtype
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype) or is_period_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
return False
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(lib.values_from_object(arr))]
|
|
from __future__ import with_statement
from optparse import make_option
import re
import shutil
import os
from django.core.management.base import CommandError
from django.template import Context
from django.template.loader import get_template
from django.conf import settings
# external
import inflection
from subcommand.management.base import BaseVerboseCommand
from generate_scaffold.management.transactions import (
FilesystemTransaction,
FileModification,
FileCreation,
Filelike,
DirectoryCreation
)
from generate_scaffold.utils.cacheclear import (
reload_django_appcache,
clean_pyc_in_dir
)
# ...
from .base import (
BaseSubCommand
# AppSubCommand,
# LabelSubCommand,
# NoArgsSubCommand,
# TemplateSubCommand
)
from ..utils import strext
fields_ptn = re.compile("^-")
class FileDestroy(object):
def __init__(self, transaction, filename):
self.transaction = transaction
self.filename = filename
self.backup_path = None
def execute(self):
self.backup_path = self.transaction.generate_path()
if os.path.exists(self.filename):
shutil.copy2(self.filename, self.backup_path)
# self.transaction.msg("backup", self.filename)
else:
self.transaction.msg("notfound", self.filename)
def rollback(self):
if not self.transaction.is_dry_run:
shutil.copy2(self.backup_path, self.filename)
self.transaction.msg("revert", self.filename)
def commit(self):
if os.path.exists(self.filename):
self.transaction.msg("destroy", self.filename)
os.remove(self.filename)
os.remove(self.backup_path)
class DirectoryDestroy(object):
def __init__(self, transaction, dirname):
self.transaction = transaction
self.dirname = dirname
def execute(self):
self.backup_path = self.transaction.generate_path()
if os.path.exists(self.dirname):
os.mkdir(self.backup_path)
# self.transaction.msg("backup", self.dirname)
else:
self.transaction.msg("notfound", self.dirname)
def rollback(self):
if not self.transaction.is_dry_run:
shutil.copy2(self.backup_path, self.dirname)
self.transaction.msg("revert", self.dirname)
def commit(self):
if os.path.exists(self.dirname):
try:
os.rmdir(self.dirname)
self.transaction.msg("destroy", self.dirname)
except OSError:
self.transaction.msg("notempty", self.dirname)
os.rmdir(self.backup_path)
class FilesystemTransactionWrapper(FilesystemTransaction):
def __init__(self, is_dry_run=False, delegate=None, destroy=False):
super(FilesystemTransactionWrapper, self).__init__(is_dry_run, delegate)
self.destroy = destroy
def rollback(self):
for entry in self.log[::-1]:
entry.rollback()
def commit(self):
for entry in self.log[::-1] if self.destroy else self.log:
entry.commit()
def open(self, filename, mode):
if self.destroy:
modification = FileDestroy(self, filename)
elif os.path.exists(filename):
modification = FileModification(self, filename)
else:
modification = FileCreation(self, filename)
modification.execute()
self.log.append(modification)
if self.is_dry_run or self.destroy:
return Filelike()
else:
return open(filename, mode)
def mkdir(self, dirname):
if self.destroy:
modification = DirectoryDestroy(self, dirname)
modification.execute()
self.log.append(modification)
elif os.path.exists(dirname):
self.msg("exists", dirname)
else:
modification = DirectoryCreation(self, dirname)
modification.execute()
self.log.append(modification)
def transaction_wrapper(self, dry_run=False, destroy=False):
# TODO: implement decorator.
return FilesystemTransactionWrapper(dry_run, self, destroy)
class GenerateMixin(object):
def __init__(self, *arg, **kwargs):
super(GenerateMixin, self).__init__(*arg, **kwargs)
self.app_name = ""
self.app_dir = ""
self.app_module = None
self.class_name = ""
self.fields = []
self.verbose = 1
self.dry_run = False
self.nodes = []
self.destroy = False
self.inflect = inflection
def template(self, template, src, **options):
self.nodes.append({"src": src, "template": template, "options": options})
def empty_directory(self, src, **options):
self.nodes.append({"src": src, "template": False, "options": options})
def create_file(self, src, **options):
self.nodes.append({"src": src, "template": "dummy", "options": options})
def empty_package(self, src, **options):
self.empty_directory(src)
self.create_file(os.path.join(src, "__init__.py"))
def run(self, dry_run=False):
with transaction_wrapper(self, dry_run, self.destroy) as transaction:
for n in self.nodes:
src = n.get("src")
template = n.get("template")
options = n.get("options")
if template:
with transaction.open(src, "w+") as f:
data = self.render_template(template, **options)
f.seek(0)
f.write(options.get("write", "" if self.destroy else data))
self.log(f.read())
else:
transaction.mkdir(src)
reload_django_appcache()
clean_pyc_in_dir(self.app_dir)
def _dictstrmap(self, func, dic):
assert isinstance(dic, dict)
dict_ = dict([(k, func(v)) for k, v in dic.items()
if isinstance(v, str)])
dic.update(dict_)
def render_template(self, template, **options):
c = {"package": self.package,
"basecommand": self.basecommand,
"usercommand": self.usercommand,
"class_name": self.class_name,
"app_name": self.app_name,
"app_class": self.app_name,
"app_dir": self.app_dir,
"fields": map(lambda field: strext(field), self.fields),
"template": template,
"options": self._dictstrmap(strext, options)
}
self._dictstrmap(strext, c)
try:
return get_template("{0}/{1}".format(self.package, template)).render(Context(c))
except Exception:
return ""
def handle(self, *args, **options):
try:
app_name = args[0]
except IndexError:
raise CommandError("You must provide an app_name.")
if app_name not in settings.INSTALLED_APPS:
raise CommandError(
"{1}. App with label {0} could not be found. " \
"Are you sure your INSTALLED_APPS setting is correct?".format(
app_name, self.usercommand))
try:
app_module = __import__(app_name)
except ImportError:
raise CommandError(
"Could not import app with name: {0}".format(app_name))
self.app_name = app_name
self.app_module = app_module
self.app_dir = app_module.__path__[0]
self.class_name = args[1] if len(args) > 1 else ""
self.fields = [arg for arg in args[2:] if not fields_ptn.match(arg)] if len(args) > 2 else ""
self.package_dir = os.path.abspath(os.path.dirname(__import__(__package__).__file__))
self.destroy = self.basecommand == "destroy"
# handle
self._handle_generate(*args, **options)
exit()
class GenerateSubCommand(GenerateMixin, BaseSubCommand):
def _handle_generate(self, *args, **options):
self.handle_generate(*args, **options)
class GenerateCommand(GenerateMixin, BaseVerboseCommand):
help = ("generate template command generator.")
option_list = BaseVerboseCommand.option_list + (
make_option('--destroy', action='store_true', dest='destroy', default=False,
help='Destroy flg.'
),
)
def _handle_generate(self, *args, **options):
try:
self.subcommand_name = args[1]
except IndexError:
raise CommandError("You must provide an subcommand name.")
self.handle_generate(*args, **options)
|
|
import codecs
from collections import defaultdict
import os
import re
import time
RPM_AVAILABLE = False
try:
import rpm
import rpmUtils.miscutils
RPM_AVAILABLE = True
except ImportError:
pass
import exception
def spec_fn(spec_dir='.'):
specs = [f for f in os.listdir(spec_dir) \
if os.path.isfile(f) and f.endswith('.spec')]
if not specs:
raise exception.SpecFileNotFound()
if len(specs) != 1:
raise exception.MultipleSpecFilesFound()
return specs[0]
def version_parts(version):
"""
Split a version into numeric and non-numeric (milestone) parts if possible.
"""
m = re.match('(\d[\d.]*)(?:\.(.+))?$', version)
if m:
return m.groups()
return version, None
def release_parts(release):
"""
Split a release string into numeric, milestone and macro parts.
"""
m = re.match('([\d.]*)([^%{}]*)(.*)$', release)
if m:
return m.group(1), m.group(2), m.group(3)
return '', '', release
def has_macros(s):
return s.find('%{') != -1
def nvrcmp(nvr1, nvr2):
if not RPM_AVAILABLE:
raise exception.RpmModuleNotAvailable()
t1 = rpmUtils.miscutils.stringToVersion(nvr1)
t2 = rpmUtils.miscutils.stringToVersion(nvr2)
return rpm.labelCompare(t1, t2)
def vcmp(v1, v2):
if not RPM_AVAILABLE:
raise exception.RpmModuleNotAvailable()
t1 = ('0', v1, '')
t2 = ('0', v2, '')
return rpm.labelCompare(t1, t2)
def nvr2version(nvr):
if not RPM_AVAILABLE:
raise exception.RpmModuleNotAvailable()
_, v, _, _, _ = rpmUtils.miscutils.splitFilename(nvr)
return v
class Spec(object):
"""
Lazy .spec file parser and editor.
"""
RE_PATCH = r'(?:^|\n)(Patch\d+:)'
RE_AFTER_SOURCES = r'((?:^|\n)Source\d+:[^\n]*\n\n?)'
RE_AFTER_PATCHES_BASE = (
r'((?:^|\n)(?:#[ \t]*\n)*#\s*patches_base\s*=[^\n]*\n(?:#[ '
r'\t]*\n)*)\n*')
def __init__(self, fn=None, txt=None):
self._fn = fn
self._txt = txt
self._rpmspec = None
@property
def fn(self):
if not self._fn:
self._fn = spec_fn()
return self._fn
@property
def txt(self):
if not self._txt:
self._txt = codecs.open(self.fn, 'r', encoding='utf-8').read()
return self._txt
@property
def rpmspec(self):
if not self._rpmspec:
if not RPM_AVAILABLE:
raise exception.RpmModuleNotAvailable()
rpm.addMacro('_sourcedir',
os.path.dirname(os.path.realpath(self.fn)))
try:
self._rpmspec = rpm.spec(self.fn)
except ValueError, e:
raise exception.SpecFileParseError(spec_fn=self.fn,
error=e.args[0])
return self._rpmspec
def expand_macro(self, macro):
rs = self.rpmspec
return rpm.expandMacro(macro)
def get_tag(self, tag, expand_macros=False):
m = re.search('^%s:\s+(\S.*)$' % tag, self.txt, re.M)
if not m:
raise exception.SpecFileParseError(spec_fn=self.fn,
error="%s tag not found" % tag)
tag = m.group(1).rstrip()
if expand_macros and has_macros(tag):
# don't parse using rpm unless required
tag = self.expand_macro(tag)
return tag
def set_tag(self, tag, value):
self._txt, n = re.subn(r'^(%s:\s+).*$' % re.escape(tag),
r'\g<1>%s' % value, self.txt, flags=re.M)
return n > 0
def get_patches_base(self, expand_macros=False):
"""Return a tuple (version, number_of_commits) that are parsed
from the patches_base in the specfile.
"""
match = re.search(r'(?<=patches_base=)[\w.+?%{}]+', self.txt)
if not match:
return None, 0
patches_base = match.group()
if expand_macros and has_macros(patches_base):
# don't parse using rpm unless required
patches_base = self.expand_macro(patches_base)
patches_base_ref, _, n_commits = patches_base.partition('+')
try:
n_commits = int(n_commits)
except ValueError as e:
n_commits = 0
return patches_base_ref, n_commits
def get_patches_ignore_regex(self):
"""Returns a string representing a regex for filtering out patches
This string is parsed from a comment in the specfile that contains the
word filter-out followed by an equal sign.
For example a comment as such:
# patches_ignore=(regex)
would mean this method returns the string '(regex)'
Only a very limited subset of characters are accepted so no fancy stuff
like matching groups etc.
"""
match = re.search(r'# *patches_ignore=([\w *.+?[\]{,}\-_]+)', self.txt)
if not match:
return None
regex_string = match.group(1)
try:
return re.compile(regex_string)
except:
return None
def _create_new_patches_base(self, base):
self._txt, n = re.subn(
self.RE_PATCH,
r'\n#\n# patches_base=%s\n#\n\g<1>' % base,
self.txt, count=1, flags=re.M)
if n != 1:
self._txt, n = re.subn(
self.RE_AFTER_SOURCES,
r'\g<1>#\n# patches_base=%s\n#\n\n' % base,
self.txt, count=1, flags=re.M)
if n != 1:
raise exception.SpecFileParseError(
spec_fn=self.fn,
error="Unable to create new #patches_base entry.")
def set_patches_base(self, base):
v, _ = self.get_patches_base()
if base:
if v is None:
self._create_new_patches_base(base)
else:
self._txt, n = re.subn(
r'(#\s*patches_base\s*=\s*)[\w.+]*',
r'\g<1>%s' % base, self.txt, flags=re.M)
if n != 1:
raise exception.SpecFileParseError(
spec_fn=self.fn,
error="Unable to set new #patches_base")
else:
if v is not None:
self._txt = re.sub(
r'(?:\n#)+\s*patches_base\s*=[^\n]*\n(?:#\n)*',
'', self.txt, flags=re.M)
def set_patches_base_version(self, version, ignore_macros=True):
if not version:
version = ''
old_pb, n_commits = self.get_patches_base()
if (ignore_macros and old_pb and has_macros(old_pb)):
return False
if n_commits > 0:
version += ("+%s" % n_commits)
self.set_patches_base(version)
return True
def get_n_patches(self):
return len(re.findall(r'^Patch[0-9]+:', self.txt, re.M))
def get_n_excluded_patches(self):
"""
Gets number of excluded patches from patches_base:
#patches_base=1.0.0+THIS_NUMBER
"""
_, n_commits = self.get_patches_base()
return n_commits
def get_patch_fns(self):
fns = []
for m in re.finditer(r'^\s*Patch\d+:\s*(\S+)\s*$', self.txt, flags=re.M):
fns.append(m.group(1))
return fns
def wipe_patches(self):
self._txt = re.sub(r'\n+(?:(?:Patch|.patch)\d+[^\n]*)', '', self.txt)
def buildarch_sanity_check(self):
bm = re.search('^BuildArch:', self.txt, flags=re.M)
if not bm:
return
bi = bm.start()
sm = re.search('^Source\d+:', self.txt, flags=re.M)
if sm:
si = sm.start()
if bi < si:
raise exception.BuildArchSanityCheckFailed()
pm = re.search('^Patch\d+:', self.txt, flags=re.M)
if pm:
pi = pm.start()
if bi < pi:
raise exception.BuildArchSanityCheckFailed()
def sanity_check(self):
if self.patches_apply_method() == 'git-am':
self.buildarch_sanity_check()
def patches_apply_method(self):
if '\ngit am %{patches}' in self.txt:
return 'git-am'
if '\n%autosetup' in self.txt:
return 'autosetup'
return 'rpm'
def set_commit_ref_macro(self, ref):
self._txt = re.sub(
r'^\%global commit \w+',
'%%global commit %s' % ref, self.txt, flags=re.M)
def set_new_patches(self, fns):
self.wipe_patches()
if not fns:
return
apply_method = self.patches_apply_method()
ps = ''
pa = ''
for i, pfn in enumerate(fns, start=1):
ps += "Patch%04d: %s\n" % (i, pfn)
if apply_method == 'rpm':
pa += "%%patch%04d -p1\n" % i
## PatchXXX: lines after Source0 / #patches_base=
self._txt, n = re.subn(
self.RE_AFTER_PATCHES_BASE,
r'\g<1>%s\n' % ps, self.txt, count=1)
if n != 1:
for m in re.finditer(self.RE_AFTER_SOURCES, self.txt):
pass
if not m:
raise exception.SpecFileParseError(
spec_fn=self.fn,
error="Failed to append PatchXXXX: lines")
i = m.end()
startnl, endnl = '', ''
if self._txt[i-2] != '\n':
startnl += '\n'
if self._txt[i] != '\n':
endnl += '\n'
self._txt = self._txt[:i] + startnl + ps + endnl + self._txt[i:]
## %patchXXX -p1 lines after "%setup" if needed
if apply_method == 'rpm':
self._txt, n = re.subn(
r'((?:^|\n)%setup[^\n]*\n)\s*',
r'\g<1>\n%s\n' % pa, self.txt)
if n == 0:
raise exception.SpecFileParseError(
spec_fn=self.fn,
error="Failed to append %patchXXXX lines after %setup")
def get_release_parts(self):
release = self.get_tag('Release')
return release_parts(release)
def recognized_release(self):
_, _, rest = self.get_release_parts()
if rest == '' or re.match('%{\??dist}', rest):
return True
return False
def set_release(self, new_release, milestone=None, postfix=None):
recognized_format = True
release = new_release
if milestone:
release += '.%s' % milestone
if postfix is None:
_, _, postfix = self.get_release_parts()
release += postfix
return self.set_tag('Release', release)
def bump_release(self, milestone=None):
numbers, milestone, postfix = self.get_release_parts()
numlist = numbers.split('.')
i = -1
if numbers[-1] == '.':
i = -2
numlist[i] = str(int(numlist[i]) + 1)
release = ".".join(numlist)
return self.set_release(release, milestone=milestone, postfix=postfix)
def new_changelog_entry(self, user, email, changes=[]):
changes_str = "\n".join(map(lambda x: "- %s" % x, changes)) + "\n"
date = time.strftime('%a %b %d %Y')
version = self.get_tag('Version', expand_macros=True)
try:
epoch = self.get_tag('Epoch')
version = '%s:%s' % (epoch, version)
except exception.SpecFileParseError:
pass
release = self.get_tag('Release', expand_macros=True)
# Assume release ends with %{?dist}
release, _, _ = release.rpartition('.')
# TODO: detect if there is '-' in changelog entries and use it if so
head = "* %s %s <%s> %s-%s" % (date, user, email, version, release)
entry = "%s\n%s\n" % (head, changes_str)
self._txt = re.sub(r'(%changelog\n)', r'\g<1>%s' % entry, self.txt)
def save(self):
if not self.txt:
# no changes
return
if not self.fn:
raise exception.InvalidAction(
"Can't save .spec file without its file name specified.")
f = codecs.open(self.fn, 'w', encoding='utf-8')
f.write(self.txt)
f.close()
self._rpmspec = None
def get_source_urls(self):
# arcane rpm constants, now in python!
sources = filter(lambda x: x[2] == 1, self.rpmspec.sources)
if len(sources) == 0:
error = "No sources found"
raise exception.SpecFileParseError(spec_fn=self.fn, error=error)
# OpenStack packages seem to always use only one tarball
sources0 = filter(lambda x: x[1] == 0, sources)
if len(sources0) == 0:
error = "Source0 not found"
raise exception.SpecFileParseError(spec_fn=self.fn, error=error)
source_url = sources0[0][0]
return [source_url]
def get_source_fns(self):
return map(os.path.basename, self.get_source_urls())
def get_last_changelog_entry(self, strip=False):
_, changelog = self.txt.split("%changelog\n")
changelog = changelog.strip()
entries = re.split(r'\n\n+', changelog)
entry = entries[0]
lines = entry.split("\n")
if strip:
lines = map(lambda x: x.lstrip(" -*\t"), lines)
return lines[0], lines[1:]
def get_requires(self, versions_as_string=False, remove_epoch=True):
reqs = defaultdict(set)
for pkg in self.rpmspec.packages:
pkg_reqs = pkg.header.dsFromHeader('requirename')
for req in pkg_reqs:
m = re.match(r'R (\S+)\s+([=<>!]+)\s*(\S+)', req.DNEVR())
if m:
name, eq, ver = m.groups()
if eq == '=':
eq = '=='
if remove_epoch:
_, sep, rest = ver.partition(':')
if sep:
ver = rest
reqs[name].add(eq + ver)
else:
name = req.N()
reqs[name]
if versions_as_string:
for name in reqs:
reqs[name] = ','.join(reqs[name])
return reqs
|
|
from matplotlib import rc
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
plot_params = {'margin': {'left': 0.13,
'right': 0.05,
'top': 0.07,
'bottom': 0.15},
'keepAxis': ['left', 'bottom'],
'markersize': 3,
'dimensions': {'width': 345},
'fontsize': 10.0,
'ratio': 1.6813}
def format(subplot=111, style='thesis'):
global plot_params, plt
rc('font', **{'family': 'serif', 'serif': ['Palatino']})
if style.lower() == 'ieee':
plot_params['margin']['bottom'] += 0.02
plot_params['margin']['left'] += 0.02
plot_params['markersize'] /= 3.0
plot_params['dimensions']['width'] = 255.0
plot_params['fontsize'] = 9.0
params = {'backend': 'ps',
'axes.labelsize': plot_params['fontsize'],
'font.size':plot_params['fontsize'],
'legend.fontsize': plot_params['fontsize'] - 2,
'axes.linewidth': 0.5,
'xtick.labelsize': plot_params['fontsize'],
'ytick.labelsize': plot_params['fontsize'],
'text.latex.preamble': '\\usepackage{siunitx}',
# 'axes.formatter.limits': '-3, 3',
'text.usetex': False,
'text.latex.unicode': True,
'lines.markersize': plot_params['markersize']}
if subplot == 111:
g_scale_left = plot_params['margin']['left']
g_scale_right = 1.0 - plot_params['margin']['right']
g_scale_top = 1.0 - plot_params['margin']['top']
g_scale_bottom = plot_params['margin']['bottom']
width_pt = plot_params['dimensions']['width']
inch_per_pt = 1.0 / 72.27
ratio = plot_params['ratio']
width_total = width_pt * inch_per_pt
width_graph = width_total * (g_scale_right - g_scale_left)
height_graph = width_graph / ratio
height_total = height_graph / (g_scale_top - g_scale_bottom)
dimensions = [width_total, height_total]
params['figure.figsize'] = dimensions
plt.rcParams.update(params)
fig = plt.figure()
# turn off ticks where there is no spine
fig.gca().xaxis.set_ticks_position('bottom')
fig.gca().yaxis.set_ticks_position('left')
# for loc, spine in plt.gca().spines.iteritems(): #For python 2
for loc, spine in list(plt.gca().spines.items()): # For python 3
if loc in plot_params['keepAxis']:
spine.set_position(('outward', 0)) # outward by 10 points
elif loc in ['right', 'top']:
spine.set_color('none') # don't draw spine
if subplot == 111:
fig.subplots_adjust(left=g_scale_left,
bottom=g_scale_bottom,
right=g_scale_right,
top=g_scale_top,
wspace=0.2,
hspace=0.2)
return fig.add_subplot(subplot)
def get_heatPair(num=5):
return [generate_colours(list(np.linspace(12, 44, num)), 90, 100),
generate_colours(list(np.linspace(224, 192, num)), 90, 100)]
def get_blues(num):
return generate_colours(list(np.linspace(225, 210, num)),
list(np.linspace(100, 100, num)),
list(np.linspace(55, 98, num)))
def get_yellows(num):
return generate_colours(list(np.linspace(34, 44, num)),
list(np.linspace(90, 90, num)),
list(np.linspace(90, 100, num)))
def get_purples(num):
return generate_colours(list(np.linspace(263, 283, num)),
list(np.linspace(90, 90, num)),
list(np.linspace(90, 100, num)))
def get_reds(num):
return generate_colours(list(np.linspace(353, 360, num)),
list(np.linspace(90, 100, num)),
list(np.linspace(75, 100, num)))
def format_labels(axis, ys, debug=False):
"""
Takes a matplotlib axes and formats the labels according to the values
containind in it. This function returns the quantifier eg. 'm' or '\eta'
to indicate any scaling that was applied to the labels
"""
if debug:
print("formatting axis " + str(axis))
# print ys
miny = abs(min(ys))
maxy = abs(max(ys))
if miny > maxy:
tmp = miny
miny = maxy
maxy = tmp
del(tmp)
if debug:
print('miny = ' + str(miny))
print('maxy = ' + str(maxy))
quantifiers = ['n', '\mu ', 'm', '', 'k', 'M', 'G']
quantifiers.reverse()
magnitudes = [1e9, 1e6, 1e3, 1, 1e-3, 1e-6, 1e-9]
# Find the lowest quantifier
for mag_low_index, mag_low_value in enumerate(magnitudes):
if miny >= mag_low_value:
break
if debug:
print('lowest magnitude = ' + str(magnitudes[mag_low_index]))
# Find the maximum quantifier
for mag_high_index, mag_high_value in enumerate(magnitudes):
if maxy >= mag_high_value:
break
# Detect if min was zero, if it was just use the max for both
if miny == 0:
mag_low_index = mag_high_index
if debug:
print('lowest magnitude adjusted to '
+ str(magnitudes[mag_low_index])
+ ' because of being equal to 0')
if debug:
print('largest magnitude = ' + str(magnitudes[mag_high_index]))
mag_mid_index = int(((mag_high_index - mag_low_index) / 2.0) + mag_low_index)
multiplyer = 1 / magnitudes[mag_mid_index]
if debug:
print('selected magnitude = ' + str(multiplyer))
quantifier = quantifiers[mag_mid_index]
if debug:
print('selected quantifier = ' + quantifier)
decimalPlaces = 2
if debug:
print('max number on y after scaling = ', (maxy * multiplyer))
if maxy * multiplyer > 10:
decimalPlaces = 1
if maxy * multiplyer > 100:
decimalPlaces = 0
formattingString = '%0.' + str(decimalPlaces) + 'f'
if debug:
print('format string = ' + formattingString)
print(multiplyer)
axis.set_major_formatter(FuncFormatter(lambda x, pos: formattingString % (x * multiplyer)))
return quantifier
def get_standardColours(num, sets=1, palette='new'):
"""
Returns the standard colour arrays for use with creating the graphs
"""
offset = 360.0 / sets
colours = []
if palette == 'new':
saturation = 90
value = 100
for i in range(sets):
hues = list(np.linspace(34 + i * offset,
44 + i * offset,
num))
saturation = list(np.linspace(90, 100, num))
value = list(np.linspace(90, 100, num))
colours.append(generate_colours(hues, saturation, value))
elif palette == 'old':
saturation = 100
value = list(np.linspace(50, 100, num))
hue = 34
for i in range(sets):
colours.append(generate_colours(hue + i * offset,
saturation,
value))
return colours
def generate_colours(hue, saturation, value):
"""
Hue is 360 degrees
Saturation is percent
Vale is percent
Any of the fields can be arrays
"""
import colorsys
num = 1
if type(hue) == list:
if len(hue) > num:
num = len(hue)
if type(saturation) == list:
if len(saturation) > num:
num = len(saturation)
if type(value) == list:
if len(value) > num:
num = len(value)
tuples = []
for i in range(num):
if type(hue) == list:
h = hue[i] / 360.0
else:
h = hue / 360.0
if type(saturation) == list:
s = saturation[i] / 100.0
else:
s = saturation / 100.0
if type(value) == list:
v = value[i] / 100.0
else:
v = value / 100.0
tuples.append((h, s, v))
return list(map(lambda x: colorsys.hsv_to_rgb(*x), tuples))
|
|
#!/usr/bin/env python
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os
import time
import argparse
import logging
import re
import shutil
import math
import random
from collections import Counter
import threading
import Queue
try:
import digits
except ImportError:
# Add path for DIGITS package
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import digits.config
digits.config.load_config()
from digits import utils, log
import numpy as np
import PIL.Image
import lmdb
import h5py
from cStringIO import StringIO
# must call digits.config.load_config() before caffe to set the path
import caffe.io
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
logger = logging.getLogger('digits.tools.create_db')
class Error(Exception):
pass
class BadInputFileError(Error):
"""Input file is empty"""
pass
class ParseLineError(Error):
"""Failed to parse a line in the input file"""
pass
class LoadError(Error):
"""Failed to load image[s]"""
pass
class WriteError(Error):
"""Failed to write image[s]"""
pass
class Hdf5DatasetExtendError(Error):
"""Failed to extend an hdf5 dataset"""
pass
class DbWriter(object):
"""
Abstract class for writing to databases
"""
def __init__(self, output_dir, image_height, image_width, image_channels):
self._dir = output_dir
os.makedirs(output_dir)
self._image_height = image_height
self._image_width = image_width
self._image_channels = image_channels
self._count = 0
def write_batch(self, batch):
raise NotImplementedError
def count(self):
return self._count
class LmdbWriter(DbWriter):
# TODO
pass
class Hdf5Writer(DbWriter):
"""
A class for writing to HDF5 files
"""
LIST_FILENAME = 'list.txt'
DTYPE = 'float32'
def __init__(self, **kwargs):
"""
Keyword arguments:
compression -- the type of dataset compression
dset_limit -- the dataset size limit
"""
self._compression = kwargs.pop('compression', None)
self._dset_limit = kwargs.pop('dset_limit', None)
super(Hdf5Writer, self).__init__(**kwargs)
self._db = None
if self._dset_limit is not None:
self._max_count = self._dset_limit / (
self._image_height * self._image_width * self._image_channels)
else:
self._max_count = None
def write_batch(self, batch):
# convert batch to numpy arrays
if batch[0][0].ndim == 2:
# add channel axis for grayscale images
data_batch = np.array([i[0][...,np.newaxis] for i in batch])
else:
data_batch = np.array([i[0] for i in batch])
# Transpose to (channels, height, width)
data_batch = data_batch.transpose((0,3,1,2))
label_batch = np.array([i[1] for i in batch])
# first batch
if self._db is None:
self._create_new_file(len(batch))
self._db['data'][:] = data_batch
self._db['label'][:] = label_batch
self._count += len(batch)
return
current_count = self._db['data'].len()
# will fit in current dataset
if current_count + len(batch) <= self._max_count:
self._db['data'].resize(current_count+len(batch),axis=0)
self._db['label'].resize(current_count+len(batch),axis=0)
self._db['data'][-len(batch):] = data_batch
self._db['label'][-len(batch):] = label_batch
self._count += len(batch)
return
# calculate how many will fit in current dataset
split = current_count + len(batch) - self._max_count
if split > 0:
# put what we can into the current dataset
self._db['data'].resize(self._max_count,axis=0)
self._db['label'].resize(self._max_count,axis=0)
self._db['data'][-split:] = data_batch[:split]
self._db['label'][-split:] = label_batch[:split]
self._count += split
self._create_new_file(len(batch) - split)
self._db['data'][:] = data_batch[split:]
self._db['label'][:] = label_batch[split:]
self._count += len(batch) - split
def _create_new_file(self, initial_count):
assert self._max_count is None or initial_count <= self._max_count, \
'Your batch size is too large for your dataset limit - %d vs %d' % \
(initial_count, self._max_count)
# close the old file
if self._db is not None:
self._db.close()
mode = 'a'
else:
mode = 'w'
# get the filename
filename = self._new_filename()
logger.info('Creating HDF5 database at "%s" ...' %
os.path.join(*filename.split(os.sep)[-2:]))
# update the list
with open(self._list_filename(), mode) as outfile:
outfile.write('%s\n' % filename)
# create the new file
self._db = h5py.File(os.path.join(self._dir, filename), 'w')
# initialize the datasets
self._db.create_dataset('data',
(initial_count,self._image_channels,
self._image_height,self._image_width),
maxshape=(self._max_count,self._image_channels,
self._image_height,self._image_width),
chunks=True, compression=self._compression, dtype=self.DTYPE)
self._db.create_dataset('label',
(initial_count,),
maxshape=(self._max_count,),
chunks=True, compression=self._compression, dtype=self.DTYPE)
def _list_filename(self):
return os.path.join(self._dir, self.LIST_FILENAME)
def _new_filename(self):
return '%s.h5' % self.count()
def create_db(input_file, output_dir,
image_width, image_height, image_channels,
backend,
resize_mode = None,
image_folder = None,
shuffle = True,
mean_files = None,
**kwargs):
"""
Create a database of images from a list of image paths
Raises exceptions on errors
Arguments:
input_file -- a textfile containing labelled image paths
output_dir -- the location to store the created database
image_width -- image resize width
image_height -- image resize height
image_channels -- image channels
backend -- the DB format (lmdb/hdf5)
Keyword arguments:
resize_mode -- passed to utils.image.resize_image()
shuffle -- if True, shuffle the images in the list before creating
mean_files -- a list of mean files to save
"""
### Validate arguments
if not os.path.exists(input_file):
raise ValueError('input_file does not exist')
if os.path.exists(output_dir):
logger.warning('removing existing database')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir, ignore_errors=True)
else:
os.remove(output_dir)
if image_width <= 0:
raise ValueError('invalid image width')
if image_height <= 0:
raise ValueError('invalid image height')
if image_channels not in [1,3]:
raise ValueError('invalid number of channels')
if resize_mode not in [None, 'crop', 'squash', 'fill', 'half_crop']:
raise ValueError('invalid resize_mode')
if image_folder is not None and not os.path.exists(image_folder):
raise ValueError('image_folder does not exist')
if mean_files:
for mean_file in mean_files:
if os.path.exists(mean_file):
logger.warning('overwriting existing mean file "%s"!' % mean_file)
else:
dirname = os.path.dirname(mean_file)
if not dirname:
dirname = '.'
if not os.path.exists(dirname):
raise ValueError('Cannot save mean file at "%s"' % mean_file)
compute_mean = bool(mean_files)
### Load lines from input_file into a load_queue
load_queue = Queue.Queue()
image_count = _fill_load_queue(input_file, load_queue, shuffle)
# Start some load threads
batch_size = _calculate_batch_size(image_count,
bool(backend=='hdf5'), kwargs.get('hdf5_dset_limit'),
image_channels, image_height, image_width)
num_threads = _calculate_num_threads(batch_size, shuffle)
write_queue = Queue.Queue(2*batch_size)
summary_queue = Queue.Queue()
for i in xrange(num_threads):
p = threading.Thread(target=_load_thread,
args=(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean)
)
p.daemon = True
p.start()
start = time.time()
if backend == 'lmdb':
_create_lmdb(image_count, write_queue, batch_size, output_dir,
summary_queue, num_threads,
mean_files, **kwargs)
elif backend == 'hdf5':
_create_hdf5(image_count, write_queue, batch_size, output_dir,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files, **kwargs)
else:
raise ValueError('invalid backend')
logger.info('Database created after %d seconds.' % (time.time() - start))
def _create_lmdb(image_count, write_queue, batch_size, output_dir,
summary_queue, num_threads,
mean_files = None,
encoding = None,
lmdb_map_size = None,
**kwargs):
"""
Create an LMDB
Keyword arguments:
encoding -- image encoding format
lmdb_map_size -- the initial LMDB map size
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
db = lmdb.open(output_dir,
map_size=lmdb_map_size,
map_async=True,
max_dbs=0)
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
image, label = write_queue.get()
datum = _array_to_datum(image, label, encoding)
batch.append(datum)
if len(batch) == batch_size:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
db.close()
def _create_hdf5(image_count, write_queue, batch_size, output_dir,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files = None,
compression = None,
hdf5_dset_limit = None,
**kwargs):
"""
Create an HDF5 file
Keyword arguments:
compression -- dataset compression format
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
writer = Hdf5Writer(
output_dir = output_dir,
image_height = image_height,
image_width = image_width,
image_channels = image_channels,
dset_limit = hdf5_dset_limit,
compression = compression,
)
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
batch.append(write_queue.get())
if len(batch) == batch_size:
writer.write_batch(batch)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
writer.write_batch(batch)
images_written += len(batch)
assert images_written == writer.count()
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
def _fill_load_queue(filename, queue, shuffle):
"""
Fill the queue with data from the input file
Print the category distribution
Returns the number of lines added to the queue
NOTE: This can be slow on a large input file, but we need the total image
count in order to report the progress, so we might as well read it all
"""
total_lines = 0
valid_lines = 0
distribution = Counter()
with open(filename) as infile:
if shuffle:
lines = infile.readlines() # less memory efficient
random.shuffle(lines)
for line in lines:
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
else:
for line in infile: # more memory efficient
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
logger.debug('%s total lines in file' % total_lines)
if valid_lines == 0:
raise BadInputFileError('No valid lines in input file')
logger.info('%s valid lines in file' % valid_lines)
for key in sorted(distribution):
logger.debug('Category %s has %d images.' % (key, distribution[key]))
return valid_lines
def _parse_line(line, distribution):
"""
Parse a line in the input file into (path, label)
"""
line = line.strip()
if not line:
raise ParseLineError
# Expect format - [/]path/to/file.jpg 123
match = re.match(r'(.+)\s+(\d+)\s*$', line)
if match is None:
raise ParseLineError
path = match.group(1)
label = int(match.group(2))
distribution[label] += 1
return path, label
def _calculate_batch_size(image_count, is_hdf5=False, hdf5_dset_limit=None,
image_channels=None, image_height=None, image_width=None):
"""
Calculates an appropriate batch size for creating this database
"""
if is_hdf5 and hdf5_dset_limit is not None:
return min(100, image_count, hdf5_dset_limit/(image_channels*image_height*image_width))
else:
return min(100, image_count)
def _calculate_num_threads(batch_size, shuffle):
"""
Calculates an appropriate number of threads for creating this database
"""
if shuffle:
return min(10, int(round(math.sqrt(batch_size))))
else:
#XXX This is the only way to preserve order for now
# This obviously hurts performance considerably
return 1
def _load_thread(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean):
"""
Consumes items in load_queue
Produces items to write_queue
Stores cumulative results in summary_queue
"""
images_added = 0
if compute_mean:
image_sum = _initial_image_sum(image_width, image_height, image_channels)
else:
image_sum = None
while not load_queue.empty():
try:
path, label = load_queue.get(True, 0.05)
except Queue.Empty:
continue
# prepend path with image_folder, if appropriate
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
try:
image = utils.image.load_image(path)
except utils.errors.LoadImageError as e:
logger.warning('[%s] %s: %s' % (path, type(e).__name__, e) )
continue
image = utils.image.resize_image(image,
image_height, image_width,
channels = image_channels,
resize_mode = resize_mode,
)
if compute_mean:
image_sum += image
write_queue.put((image, label))
images_added += 1
summary_queue.put((images_added, image_sum))
def _initial_image_sum(width, height, channels):
"""
Returns an array of zeros that will be used to store the accumulated sum of images
"""
if channels == 1:
return np.zeros((height, width), np.float64)
else:
return np.zeros((height, width, channels), np.float64)
def _array_to_datum(image, label, encoding):
"""
Create a caffe Datum from a numpy.ndarray
"""
if not encoding:
# Transform to caffe's format requirements
if image.ndim == 3:
# Transpose to (channels, height, width)
image = image.transpose((2,0,1))
if image.shape[0] == 3:
# channel swap
# XXX see issue #59
image = image[[2,1,0],...]
elif image.ndim == 2:
# Add a channels axis
image = image[np.newaxis,:,:]
else:
raise Exception('Image has unrecognized shape: "%s"' % image.shape)
datum = caffe.io.array_to_datum(image, label)
else:
datum = caffe_pb2.Datum()
if image.ndim == 3:
datum.channels = image.shape[2]
else:
datum.channels = 1
datum.height = image.shape[0]
datum.width = image.shape[1]
datum.label = label
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(image).save(s, format='PNG')
elif encoding == 'jpg':
PIL.Image.fromarray(image).save(s, format='JPEG', quality=90)
else:
raise ValueError('Invalid encoding type')
datum.data = s.getvalue()
datum.encoded = True
return datum
def _write_batch_lmdb(db, batch, image_count):
"""
Write a batch to an LMDB database
"""
try:
with db.begin(write=True) as lmdb_txn:
for i, datum in enumerate(batch):
key = '%08d_%d' % (image_count + i, datum.label)
lmdb_txn.put(key, datum.SerializeToString())
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
logger.debug('Doubling LMDB map size to %sMB ...' % (new_limit>>20,))
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0,87):
raise Error('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
_write_batch_lmdb(db, batch, image_count)
def _save_means(image_sum, image_count, mean_files):
"""
Save mean[s] to file
"""
mean = np.around(image_sum / image_count).astype(np.uint8)
for mean_file in mean_files:
if mean_file.lower().endswith('.npy'):
np.save(mean_file, mean)
elif mean_file.lower().endswith('.binaryproto'):
data = mean
# Transform to caffe's format requirements
if data.ndim == 3:
# Transpose to (channels, height, width)
data = data.transpose((2,0,1))
if data.shape[0] == 3:
# channel swap
# XXX see issue #59
data = data[[2,1,0],...]
elif mean.ndim == 2:
# Add a channels axis
data = data[np.newaxis,:,:]
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels, blob.height, blob.width = data.shape
blob.data.extend(data.astype(float).flat)
with open(mean_file, 'wb') as outfile:
outfile.write(blob.SerializeToString())
elif mean_file.lower().endswith(('.jpg', '.jpeg', '.png')):
image = PIL.Image.fromarray(mean)
image.save(mean_file)
else:
logger.warning('Unrecognized file extension for mean file: "%s"' % mean_file)
continue
logger.info('Mean saved at "%s"' % mean_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-Db tool - DIGITS')
### Positional arguments
parser.add_argument('input_file',
help='An input file of labeled images')
parser.add_argument('output_dir',
help='Path to the output database')
parser.add_argument('width',
type=int,
help='width of resized images'
)
parser.add_argument('height',
type=int,
help='height of resized images'
)
### Optional arguments
parser.add_argument('-c', '--channels',
type=int,
default=3,
help='channels of resized images (1 for grayscale, 3 for color [default])'
)
parser.add_argument('-r', '--resize_mode',
help='resize mode for images (must be "crop", "squash" [default], "fill" or "half_crop")'
)
parser.add_argument('-m', '--mean_file', action='append',
help="location to output the image mean (doesn't save mean if not specified)")
parser.add_argument('-f', '--image_folder',
help='folder containing the images (if the paths in input_file are not absolute)')
parser.add_argument('-s', '--shuffle',
action='store_true',
help='Shuffle images before saving'
)
parser.add_argument('-e', '--encoding',
help = 'Image encoding format (jpg/png)'
)
parser.add_argument('-C', '--compression',
help = 'Database compression format (gzip)'
)
parser.add_argument('-b', '--backend',
default='lmdb',
help = 'The database backend - lmdb[default] or hdf5')
parser.add_argument('--lmdb_map_size',
type=int,
help = 'The initial map size for LMDB (in MB)')
parser.add_argument('--hdf5_dset_limit',
type=int,
help = 'The size limit for HDF5 datasets')
args = vars(parser.parse_args())
if args['lmdb_map_size']:
# convert from MB to B
args['lmdb_map_size'] <<= 20
try:
create_db(args['input_file'], args['output_dir'],
args['width'], args['height'], args['channels'],
args['backend'],
resize_mode = args['resize_mode'],
image_folder = args['image_folder'],
shuffle = args['shuffle'],
mean_files = args['mean_file'],
encoding = args['encoding'],
compression = args['compression'],
lmdb_map_size = args['lmdb_map_size'],
hdf5_dset_limit = args['hdf5_dset_limit'],
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
|
|
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for building TFF optimizers from flags."""
import collections
import inspect
from typing import Any, Dict, List, Optional
from absl import flags
from absl import logging
import tensorflow_federated as tff
# List of optimizers currently supported.
_OPTIMIZER_BUILDERS = collections.OrderedDict(
adagrad=tff.learning.optimizers.build_adagrad,
adam=tff.learning.optimizers.build_adam,
rmsprop=tff.learning.optimizers.build_rmsprop,
sgd=tff.learning.optimizers.build_sgdm,
yogi=tff.learning.optimizers.build_yogi)
def define_optimizer_flags(prefix: str) -> None:
"""Defines flags with `prefix` to configure an optimizer.
This method is inteded to be paired with `create_optimizer_from_flags` using
the same `prefix`, to allow Python binaries to constructed TensorFlow
optimizers parameterized by commandline flags.
This creates two new flags:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to a suite of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, given the prefix "client" this will create flags (non-exhaustive
list):
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
Then calls to `create_optimizer_from_flags('client')` will construct an
optimizer of the type named in `--client_optimizer`, parameterized by the
flags prefixed with the matching optimizer name. For example, if
`--client_optimizer=sgd`, `--client_sgd_*` flags will be used.
IMPORTANT: For flags to be correctly parsed from the commandline, this method
must be called before `absl.app.run(main)`, and is recommened to be called
next to other flag definitions at the top of a py_binary.
Args:
prefix: A string (possibly empty) indicating which optimizer is being
configured.
"""
# Create top-level, non-optimizer specific flags for picking the optimizer
# type and the learning rate.
flags.DEFINE_enum(
name='{!s}_optimizer'.format(prefix),
default=None,
enum_values=list(_OPTIMIZER_BUILDERS.keys()),
help='The type of optimizer to construct for `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_optimizer'.format(prefix))
flags.DEFINE_float(
name='{!s}_learning_rate'.format(prefix),
default=None,
help='Base learning rate for optimizer `{!s}`'.format(prefix))
logging.info('Defined new flag: [%s]', '{!s}_learning_rate'.format(prefix))
for optimizer_name, optimizer_builder in _OPTIMIZER_BUILDERS.items():
# Pull out the constructor parameters except for `self`.
constructor_signature = inspect.signature(optimizer_builder)
constructor_params = list(constructor_signature.parameters.values())[1:]
def prefixed(basename, optimizer_name=optimizer_name):
if prefix:
return '{!s}_{!s}_{!s}'.format(prefix, optimizer_name, basename)
else:
return '{!s}_{!s}'.format(optimizer_name, basename)
def is_param_of_type(param, typ):
return (param.default is None and param.annotation == Optional[typ] or
isinstance(param.default, typ))
for param in constructor_params:
if param.name in ['kwargs', 'args', 'learning_rate']:
continue
if is_param_of_type(param, bool):
define_flag_fn = flags.DEFINE_bool
elif is_param_of_type(param, float):
define_flag_fn = flags.DEFINE_float
elif is_param_of_type(param, int):
define_flag_fn = flags.DEFINE_integer
elif is_param_of_type(param, str):
define_flag_fn = flags.DEFINE_string
elif is_param_of_type(param, List[str]):
define_flag_fn = flags.DEFINE_multi_string
else:
raise NotImplementedError('Cannot define flag [{!s}] '
'for parameter [{!s}] of type [{!s}] '
'(default value type [{!s}]) '
'on optimizer [{!s}]'.format(
prefixed(param.name),
param.name, param.annotation,
type(param.default), optimizer_name))
define_flag_fn(
name=prefixed(param.name),
default=param.default,
help='{!s} argument for the {!s} optimizer.'.format(
param.name, optimizer_name))
logging.info('Defined new flag: [%s]', prefixed(param.name))
def remove_unused_flags(prefix: str,
hparam_dict: Dict[str, Any]) -> collections.OrderedDict:
"""Removes unused optimizer flags with a given prefix.
This method is intended to be used with `define_optimizer_flags`, and is used
to remove elements of hparam_dict associated with unused optimizer flags.
For example, given the prefix "client", define_optimizer_flags will create
flags including:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
* `--client_adam_beta_1`
* `--client_adam_beta_2`
* `--client_adam_epsilon`
and other such flags.
However, for purposes of recording hyperparameters, we would like to only keep
those that correspond to the optimizer selected in the flag
--client_optimizer. This method is intended to remove the unused flags.
For example, if `--client_optimizer=sgd` was set, then calling this method
with the prefix `client` will remove all pairs in hparam_dict except those
associated with the flags:
* `--client_optimizer`
* `--client_learning_rate`
* `--client_sgd_momentum`
Args:
prefix: The prefix used to define optimizer flags, such as via
`optimizer_utils.define_optimizer_flags(prefix)`. Standard examples
include `prefix=client` and `prefix=server`.
hparam_dict: A dictionary of (string, value) pairs corresponding to
experiment hyperparameters.
Returns:
An ordered dictionary of (string, value) pairs from hparam_dict that omits
any pairs where string = "<prefix>_<optimizer>*" but <optimizer> is not the
one set via the flag --<prefix>_optimizer=...
"""
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
if prefixed('optimizer') not in hparam_dict.keys():
raise ValueError('The flag {!s} was not defined.'.format(
prefixed('optimizer')))
optimizer_name = hparam_dict[prefixed('optimizer')]
if not optimizer_name:
raise ValueError('The flag {!s} was not set. Unable to determine the '
'relevant optimizer.'.format(prefixed('optimizer')))
unused_optimizer_flag_prefixes = [
prefixed(k) for k in _OPTIMIZER_BUILDERS.keys() if k != optimizer_name
]
def _is_used_flag(flag_name):
# We filter by whether the flag contains an unused optimizer prefix.
# This retains any flag not of the form <prefix>_<optimizer>_*.
for unused_flag_prefix in unused_optimizer_flag_prefixes:
if flag_name.startswith(f'{unused_flag_prefix}_'):
return False
return True
used_flags = collections.OrderedDict()
for (flag_name, flag_value) in hparam_dict.items():
if _is_used_flag(flag_name):
used_flags[flag_name] = flag_value
return used_flags
def create_optimizer_from_flags(
prefix: str) -> tff.learning.optimizers.Optimizer:
"""Returns an optimizer based on prefixed flags.
This method is inteded to be paired with `define_optimizer_flags` using the
same `prefix`, to allow Python binaries to constructed TensorFlow optimizers
parameterized by commandline flags.
This method expects at least two flags to have been defined and set:
* `--<prefix>_optimizer=<optimizer name>`
* `--<prefix>_learning_rate`
In addition to suites of flags for each optimizer:
* `--<prefix>_<optimizer name>_<constructor_argument>`
For example, if `prefix='client'` this method first reads the flags:
* `--client_optimizer`
* `--client_learning_rate`
If the optimizer flag is `'sgd'`, then an SGD-based optimizer is constructed
using the values in the flags prefixed with `--client_sgd_`.
Args:
prefix: The same string prefix passed to `define_optimizer_flags`.
Returns:
A `tff.learning.optimizers.Optimizer`.
"""
def prefixed(basename):
return '{}_{}'.format(prefix, basename) if prefix else basename
optimizer_flag_name = prefixed('optimizer')
if flags.FLAGS[optimizer_flag_name] is None:
raise ValueError('Must specify flag --{!s}'.format(optimizer_flag_name))
optimizer_name = flags.FLAGS[optimizer_flag_name].value
optimizer_builder = _OPTIMIZER_BUILDERS.get(optimizer_name)
if optimizer_builder is None:
logging.error('Unknown optimizer [%s], known optimizers are [%s].',
optimizer_name, list(_OPTIMIZER_BUILDERS.keys()))
raise ValueError('`{!s}` is not a valid optimizer for flag --{!s}, must be '
'one of {!s}. See error log for details.'.format(
optimizer_name, optimizer_flag_name,
list(_OPTIMIZER_BUILDERS.keys())))
def _has_user_value(flag):
"""Check if a commandline flag has a user set value."""
return flag.present or flag.value != flag.default
# Validate that the optimizers that weren't picked don't have flag values set.
# Settings that won't be used likely means there is an expectation gap between
# the user and the system and we should notify them.
unused_flag_prefixes = [
prefixed(k) for k in _OPTIMIZER_BUILDERS.keys() if k != optimizer_name
]
mistakenly_set_flags = []
for flag_name in flags.FLAGS:
if not _has_user_value(flags.FLAGS[flag_name]):
# Flag was not set by the user, skip it.
continue
# Otherwise the flag has a value set by the user.
for unused_prefix in unused_flag_prefixes:
if flag_name.startswith(f'{unused_prefix}_'):
mistakenly_set_flags.append(flag_name)
break
if mistakenly_set_flags:
raise ValueError('Commandline flags for optimizers other than [{!s}] '
'(value of --{!s}) are set. These would be ignored, '
'were the flags set by mistake? Flags: {!s}'.format(
optimizer_name, optimizer_flag_name,
mistakenly_set_flags))
lr_flag_name = prefixed('learning_rate')
lr_flag = flags.FLAGS[lr_flag_name]
kwargs = {}
if _has_user_value(lr_flag):
kwargs['learning_rate'] = lr_flag.value
else:
raise ValueError(
'Learning rate for {!s} must be set by the flag --{!s} .'.format(
prefix, lr_flag_name))
flag_prefix = prefixed(optimizer_name)
prefix_len = len(flag_prefix) + 1
for flag_name in flags.FLAGS:
if not flag_name.startswith(f'{flag_prefix}_'):
continue
arg_name = flag_name[prefix_len:]
kwargs[arg_name] = flags.FLAGS[flag_name].value
return optimizer_builder(**kwargs)
|
|
# pylint: disable=unused-argument
import importlib
import logging
import pytest
import sys
import warnings
from concurrent.futures import ThreadPoolExecutor
from io import StringIO
from itertools import permutations
from unittest import mock
import mlflow
from mlflow.tracking import MlflowClient
from mlflow.utils import gorilla
from mlflow.utils.autologging_utils import (
safe_patch,
get_autologging_config,
autologging_is_disabled,
)
from tests.autologging.fixtures import test_mode_off
from tests.autologging.fixtures import reset_stderr # pylint: disable=unused-import
pytestmark = pytest.mark.large
AUTOLOGGING_INTEGRATIONS_TO_TEST = {
mlflow.sklearn: "sklearn",
mlflow.keras: "keras",
mlflow.xgboost: "xgboost",
mlflow.lightgbm: "lightgbm",
mlflow.pytorch: "torch",
mlflow.gluon: "mxnet.gluon",
mlflow.fastai: "fastai",
mlflow.statsmodels: "statsmodels",
mlflow.spark: "pyspark",
mlflow.pyspark.ml: "pyspark",
}
@pytest.fixture(autouse=True, scope="module")
def import_integration_libraries():
for library_module in AUTOLOGGING_INTEGRATIONS_TO_TEST.values():
importlib.import_module(library_module)
@pytest.fixture(autouse=True)
def disable_autologging_at_test_end():
# The yeild statement is to insure that code below is executed as teardown code.
# This will avoid bleeding of an active autologging session from test suite.
yield
for integration in AUTOLOGGING_INTEGRATIONS_TO_TEST:
integration.autolog(disable=True)
@pytest.fixture()
def setup_sklearn_model():
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
X, y = load_iris(return_X_y=True)
model = LogisticRegression()
return X, y, model
@pytest.mark.parametrize("integration", AUTOLOGGING_INTEGRATIONS_TO_TEST.keys())
def test_autologging_integrations_expose_configs_and_support_disablement(integration):
for integration in AUTOLOGGING_INTEGRATIONS_TO_TEST:
integration.autolog(disable=False)
integration_name = integration.autolog.integration_name
assert not autologging_is_disabled(integration_name)
assert not get_autologging_config(integration_name, "disable", True)
integration.autolog(disable=True)
assert autologging_is_disabled(integration_name)
assert get_autologging_config(integration_name, "disable", False)
@pytest.mark.parametrize("integration", AUTOLOGGING_INTEGRATIONS_TO_TEST.keys())
def test_autologging_integrations_use_safe_patch_for_monkey_patching(integration):
for integration in AUTOLOGGING_INTEGRATIONS_TO_TEST:
with mock.patch(
"mlflow.utils.gorilla.apply", wraps=gorilla.apply
) as gorilla_mock, mock.patch(
integration.__name__ + ".safe_patch", wraps=safe_patch
) as safe_patch_mock:
# In `mlflow.xgboost.autolog()` and `mlflow.lightgbm.autolog()`,
# we enable autologging for XGBoost and LightGBM sklearn models
# using `mlflow.sklearn._autolog()`. So besides `safe_patch` calls in
# `autolog()`, we need to count additional `safe_patch` calls
# in sklearn autologging routine as well.
if integration.__name__ in ["mlflow.xgboost", "mlflow.lightgbm"]:
with mock.patch(
"mlflow.sklearn.safe_patch", wraps=safe_patch
) as sklearn_safe_patch_mock:
integration.autolog(disable=False)
safe_patch_call_count = (
safe_patch_mock.call_count + sklearn_safe_patch_mock.call_count
)
else:
integration.autolog(disable=False)
safe_patch_call_count = safe_patch_mock.call_count
assert safe_patch_call_count > 0
# `safe_patch` leverages `gorilla.apply` in its implementation. Accordingly, we expect
# that the total number of `gorilla.apply` calls to be equivalent to the number of
# `safe_patch` calls. This verifies that autologging integrations are leveraging
# `safe_patch`, rather than calling `gorilla.apply` directly (which does not provide
# exception safety properties)
assert safe_patch_call_count == gorilla_mock.call_count
def test_autolog_respects_exclusive_flag(setup_sklearn_model):
x, y, model = setup_sklearn_model
mlflow.sklearn.autolog(exclusive=True)
run = mlflow.start_run()
model.fit(x, y)
mlflow.end_run()
run_data = MlflowClient().get_run(run.info.run_id).data
metrics, params, tags = run_data.metrics, run_data.params, run_data.tags
assert not metrics
assert not params
assert all("mlflow." in key for key in tags)
mlflow.sklearn.autolog(exclusive=False)
run = mlflow.start_run()
model.fit(x, y)
mlflow.end_run()
run_data = MlflowClient().get_run(run.info.run_id).data
metrics, params = run_data.metrics, run_data.params
assert metrics
assert params
def test_autolog_respects_disable_flag(setup_sklearn_model):
x, y, model = setup_sklearn_model
mlflow.sklearn.autolog(disable=True, exclusive=False)
run = mlflow.start_run()
model.fit(x, y)
mlflow.end_run()
run_data = MlflowClient().get_run(run.info.run_id).data
metrics, params, tags = run_data.metrics, run_data.params, run_data.tags
assert not metrics
assert not params
assert all("mlflow." in key for key in tags)
mlflow.sklearn.autolog(disable=False, exclusive=False)
run = mlflow.start_run()
model.fit(x, y)
mlflow.end_run()
run_data = MlflowClient().get_run(run.info.run_id).data
metrics, params = run_data.metrics, run_data.params
assert metrics
assert params
def test_autolog_reverts_patched_code_when_disabled():
# use `KMeans` because it implements `fit`, `fit_transform`, and `fit_predict`.
from sklearn.cluster import KMeans
# Before any patching.
model = KMeans()
original_fit = model.fit
original_fit_transform = model.fit_transform
original_fit_predict = model.fit_predict
# After patching.
mlflow.sklearn.autolog(disable=False)
patched_fit = model.fit
patched_fit_transform = model.fit_transform
patched_fit_predict = model.fit_predict
assert patched_fit != original_fit
assert patched_fit_transform != original_fit_transform
assert patched_fit_predict != original_fit_predict
# After revert of patching.
mlflow.sklearn.autolog(disable=True)
reverted_fit = model.fit
reverted_fit_transform = model.fit_transform
reverted_fit_predict = model.fit_predict
assert reverted_fit == original_fit
assert reverted_fit_transform == original_fit_transform
assert reverted_fit_predict == original_fit_predict
assert reverted_fit != patched_fit
assert reverted_fit_transform != patched_fit_transform
assert reverted_fit_predict != patched_fit_predict
def test_autolog_respects_disable_flag_across_import_orders():
def test():
from sklearn import svm, datasets
iris = datasets.load_iris()
svc = svm.SVC(C=2.0, degree=5, kernel="rbf")
run = mlflow.start_run()
svc.fit(iris.data, iris.target)
mlflow.end_run()
run_data = MlflowClient().get_run(run.info.run_id).data
metrics, params, tags = run_data.metrics, run_data.params, run_data.tags
assert not metrics
assert not params
assert all("mlflow." in key for key in tags)
def import_sklearn():
import sklearn # pylint: disable=unused-import
def disable_autolog():
mlflow.sklearn.autolog(disable=True)
def mlflow_autolog():
mlflow.autolog()
import_list = [import_sklearn, disable_autolog, mlflow_autolog]
for func_order_list in permutations(import_list):
for fun in func_order_list:
fun()
test()
@pytest.mark.usefixtures(test_mode_off.__name__)
def test_autolog_respects_silent_mode(tmpdir):
# Use file-based experiment storage for this test. Otherwise, concurrent experiment creation in
# multithreaded contexts may fail for other storage backends (e.g. SQLAlchemy)
mlflow.set_tracking_uri(str(tmpdir))
mlflow.set_experiment("test_experiment")
og_showwarning = warnings.showwarning
stream = StringIO()
sys.stderr = stream
logger = logging.getLogger(mlflow.__name__)
from sklearn import datasets
iris = datasets.load_iris()
def train_model():
import sklearn.utils
from sklearn import svm
from sklearn.model_selection import GridSearchCV
parameters = {"kernel": ("linear", "rbf"), "C": [1, 10]}
svc = svm.SVC()
with sklearn.utils.parallel_backend(backend="threading"):
clf = GridSearchCV(svc, parameters)
clf.fit(iris.data, iris.target)
return True
# Call general and framework-specific autologging APIs to cover a
# larger surface area for testing purposes
mlflow.autolog(silent=True)
mlflow.sklearn.autolog(silent=True, log_input_examples=True)
executions = []
with ThreadPoolExecutor(max_workers=50) as executor:
for _ in range(2):
e = executor.submit(train_model)
executions.append(e)
assert all([e.result() is True for e in executions])
assert not stream.getvalue()
# Verify that `warnings.showwarning` was restored to its original value after training
# and that MLflow event logs are enabled
assert warnings.showwarning == og_showwarning
logger.info("verify that event logs are enabled")
assert "verify that event logs are enabled" in stream.getvalue()
stream.truncate(0)
mlflow.sklearn.autolog(silent=False, log_input_examples=True)
executions = []
with ThreadPoolExecutor(max_workers=50) as executor:
for _ in range(100):
e = executor.submit(train_model)
executions.append(e)
assert all([e.result() is True for e in executions])
assert stream.getvalue()
# Verify that `warnings.showwarning` was restored to its original value after training
# and that MLflow event logs are enabled
assert warnings.showwarning == og_showwarning
logger.info("verify that event logs are enabled")
assert "verify that event logs are enabled" in stream.getvalue()
# TODO: Investigate why this test occasionally leaks a run, which causes the
# `clean_up_leaked_runs` fixture in `tests/conftest.py` to fail.
while mlflow.active_run():
mlflow.end_run()
def test_autolog_globally_configured_flag_set_correctly():
from mlflow.utils.autologging_utils import AUTOLOGGING_INTEGRATIONS
AUTOLOGGING_INTEGRATIONS.clear()
import sklearn # pylint: disable=unused-import,unused-variable
import pyspark # pylint: disable=unused-import,unused-variable
import pyspark.ml # pylint: disable=unused-import,unused-variable
integrations_to_test = ["sklearn", "spark", "pyspark.ml"]
mlflow.autolog()
for integration_name in integrations_to_test:
assert AUTOLOGGING_INTEGRATIONS[integration_name]["globally_configured"]
mlflow.sklearn.autolog()
mlflow.spark.autolog()
mlflow.pyspark.ml.autolog()
for integration_name in integrations_to_test:
assert "globally_configured" not in AUTOLOGGING_INTEGRATIONS[integration_name]
|
|
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import collections
import contextlib
import difflib
import os
import re
import sys
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron._i18n import _, _LE, _LW
from neutron.agent.common import config
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import utils as linux_utils
from neutron.common import exceptions as n_exc
from neutron.common import utils
LOG = logging.getLogger(__name__)
config.register_iptables_opts(cfg.CONF)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(sys.argv[0])[:16].replace(' ', '_')
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def comment_rule(rule, comment):
if not cfg.CONF.AGENT.comment_iptables_rules or not comment:
return rule
# iptables-save outputs the comment before the jump so we need to match
# that order so _find_last_entry works
comment = '-m comment --comment "%s"' % comment
if rule.startswith('-j'):
# this is a jump only rule so we just put the comment first
return '%s %s' % (comment, rule)
try:
jpos = rule.index(' -j ')
return ' '.join((rule[:jpos], comment, rule[jpos + 1:]))
except ValueError:
return '%s %s' % (rule, comment)
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None, comment=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
self.comment = comment
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return comment_rule('-A %s %s' % (chain, self.rule), self.comment)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if neutron-openvswitch-agent creates a chain named 'OUTPUT',
it'll actually end up being named 'neutron-openvswi-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.debug('Attempted to remove chain %s which does not exist',
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [str(r) for r in self.rules
if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [str(r) for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None,
comment=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag, comment))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False, comment=None):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
if not wrap:
self.remove_rules.append(str(IptablesRule(chain, rule, wrap,
top, self.wrap_name,
comment=comment)))
except ValueError:
LOG.warning(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def _get_chain_rules(self, chain, wrap):
chain = get_chain_name(chain, wrap)
return [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = self._get_chain_rules(chain, wrap)
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT.
Its name is not wrapped, so it's shared between the various neutron
workers. It's intended for rules that need to live at the top of the
FORWARD and OUTPUT chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False, use_ipv6=False,
namespace=None, binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various neutron components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'mangle': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update(
{'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT',
'POSTROUTING']})
self.ipv6.update(
{'mangle': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[6].update(
{'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT',
'POSTROUTING']})
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
self.ipv4.update({'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'raw': ['PREROUTING', 'OUTPUT']})
self.ipv6.update({'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[6].update({'raw': ['PREROUTING', 'OUTPUT']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in six.iteritems(builtin_chains[ip_version]):
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various neutron components. We set it as the
# last chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False,
comment=ic.SNAT_OUT)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
# Add a mark chain to mangle PREROUTING chain. It is used to
# identify ingress packets from a certain interface.
self.ipv4['mangle'].add_chain('mark')
self.ipv4['mangle'].add_rule('PREROUTING', '-j $mark')
def get_tables(self, ip_version):
return {4: self.ipv4, 6: self.ipv6}[ip_version]
def get_chain(self, table, chain, ip_version=4, wrap=True):
try:
requested_table = self.get_tables(ip_version)[table]
except KeyError:
return []
return requested_table._get_chain_rules(chain, wrap)
def is_chain_empty(self, table, chain, ip_version=4, wrap=True):
return not self.get_chain(table, chain, ip_version, wrap)
@contextlib.contextmanager
def defer_apply(self):
"""Defer apply context."""
self.defer_apply_on()
try:
yield
finally:
try:
self.defer_apply_off()
except Exception:
msg = _('Failure applying iptables rules')
LOG.exception(msg)
raise n_exc.IpTablesApplyException(msg)
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
return self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
return self._apply_synchronized()
def get_rules_for_table(self, table):
"""Runs iptables-save on a table and returns the results."""
args = ['iptables-save', '-t', table]
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
return self.execute(args, run_as_root=True).split('\n')
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will create a diff between the rules from the previous runs
and replace them with the current set of rules.
This happens atomically, thanks to iptables-restore.
Returns a list of the changes that were sent to iptables-save.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
all_commands = [] # variable to keep track all commands for return val
for cmd, tables in s:
args = ['%s-save' % (cmd,)]
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
save_output = self.execute(args, run_as_root=True)
all_lines = save_output.split('\n')
commands = []
# Traverse tables in sorted order for predictable dump output
for table_name in sorted(tables):
table = tables[table_name]
# isolate the lines of the table we are modifying
start, end = self._find_table(all_lines, table_name)
old_rules = all_lines[start:end]
# generate the new table state we want
new_rules = self._modify_rules(old_rules, table, table_name)
# generate the iptables commands to get between the old state
# and the new state
changes = _generate_path_between_rules(old_rules, new_rules)
if changes:
# if there are changes to the table, we put on the header
# and footer that iptables-save needs
commands += (['# Generated by iptables_manager'] +
['*%s' % table_name] + changes +
['COMMIT', '# Completed by iptables_manager'])
if not commands:
continue
all_commands += commands
args = ['%s-restore' % (cmd,), '-n']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
# always end with a new line
commands.append('')
self.execute(args, process_input='\n'.join(commands),
run_as_root=True)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(commands)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
commands[log_start:log_end],
log_start + 1)
)
LOG.error(_LE("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug("IPTablesManager.apply completed with success. %d iptables "
"commands were issued", len(all_commands))
return all_commands
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name)
except ValueError:
# Couldn't find table_name
LOG.debug('Unable to find table %s', table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 1
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _modify_rules(self, current_lines, table, table_name):
# Chains are stored as sets to avoid duplicates.
# Sort the output chains here to make their order predictable.
unwrapped_chains = sorted(table.unwrapped_chains)
chains = sorted(table.chains)
# we don't want to change any rules that don't belong to us so we start
# the new_filter with these rules
new_filter = [line.strip() for line in current_lines
if self.wrap_name not in line]
# generate our list of chain names
our_chains = [':%s-%s' % (self.wrap_name, name) for name in chains]
# the unwrapped chains (e.g. neutron-filter-top) may already exist in
# the new_filter since they aren't marked by the wrap_name so we only
# want to add them if they arent' already there
our_chains += [':%s' % name for name in unwrapped_chains
if not any(':%s' % name in s for s in new_filter)]
our_top_rules = []
our_bottom_rules = []
for rule in table.rules:
rule_str = str(rule)
# similar to the unwrapped chains, there are some rules that belong
# to us but they don't have the wrap name. we want to remove them
# from the new_filter and then add them in the right location in
# case our new rules changed the order.
# (e.g. '-A FORWARD -j neutron-filter-top')
new_filter = [s for s in new_filter if rule_str not in s]
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_top_rules += [rule_str]
else:
our_bottom_rules += [rule_str]
our_chains_and_rules = our_chains + our_top_rules + our_bottom_rules
# locate the position immediately after the existing chains to insert
# our chains and rules
rules_index = self._find_rules_index(new_filter)
new_filter[rules_index:rules_index] = our_chains_and_rules
def _weed_out_removes(line):
# remove any rules or chains from the filter that were slated
# for removal
if line.startswith(':'):
chain = line[1:]
if chain in table.remove_chains:
table.remove_chains.remove(chain)
return False
else:
if line in table.remove_rules:
table.remove_rules.remove(line)
return False
# Leave it alone
return True
seen_lines = set()
# TODO(kevinbenton): remove this function and the next one. They are
# just oversized brooms to sweep bugs under the rug!!! We generate the
# rules and we shouldn't be generating duplicates.
def _weed_out_duplicates(line):
if line in seen_lines:
thing = 'chain' if line.startswith(':') else 'rule'
LOG.warning(_LW("Duplicate iptables %(thing)s detected. This "
"may indicate a bug in the the iptables "
"%(thing)s generation code. Line: %(line)s"),
{'thing': thing, 'line': line})
return False
seen_lines.add(line)
# Leave it alone
return True
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicates(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case a rule or chain marked for removal
# was already gone. (chains is a set, rules is a list)
table.remove_chains.clear()
table.remove_rules = []
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
if self.use_ipv6:
cmd_tables += [('ip6tables', key)
for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warning(_LW('Attempted to get traffic counters of chain %s '
'which does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = self.execute(args, run_as_root=True)
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
def _generate_path_between_rules(old_rules, new_rules):
"""Generates iptables commands to get from old_rules to new_rules.
This function diffs the two rule sets and then calculates the iptables
commands necessary to get from the old rules to the new rules using
insert and delete commands.
"""
old_by_chain = _get_rules_by_chain(old_rules)
new_by_chain = _get_rules_by_chain(new_rules)
old_chains, new_chains = set(old_by_chain.keys()), set(new_by_chain.keys())
# all referenced chains should be declared at the top before rules.
# NOTE(kevinbenton): sorting and grouping chains is for determinism in
# tests. iptables doesn't care about the order here
statements = [':%s - [0:0]' % c for c in sorted(new_chains - old_chains)]
sg_chains = []
other_chains = []
for chain in sorted(old_chains | new_chains):
if '-sg-' in chain:
sg_chains.append(chain)
else:
other_chains.append(chain)
for chain in other_chains + sg_chains:
statements += _generate_chain_diff_iptables_commands(
chain, old_by_chain[chain], new_by_chain[chain])
# unreferenced chains get the axe
for chain in sorted(old_chains - new_chains):
statements += ['-X %s' % chain]
return statements
def _get_rules_by_chain(rules):
by_chain = collections.defaultdict(list)
for line in rules:
if line.startswith(':'):
chain = line[1:].split(' ', 1)[0]
# even though this is a default dict, we need to manually add
# chains to ensure that ones without rules are included because
# they might be a jump reference
if chain not in by_chain:
by_chain[chain] = []
elif line.startswith('-A'):
chain = line[3:].split(' ', 1)[0]
by_chain[chain].append(line)
return by_chain
def _generate_chain_diff_iptables_commands(chain, old_chain_rules,
new_chain_rules):
# keep track of the old index because we have to insert rules
# in the right position
old_index = 1
statements = []
for line in difflib.ndiff(old_chain_rules, new_chain_rules):
if line.startswith('?'):
# skip ? because that's a guide string for intraline differences
continue
elif line.startswith('-'): # line deleted
statements.append('-D %s %d' % (chain, old_index))
# since we are removing a line from the old rules, we
# backup the index by 1
old_index -= 1
elif line.startswith('+'): # line added
# strip the chain name since we have to add it before the index
rule = line[5:].split(' ', 1)[-1]
# rule inserted at this position
statements.append('-I %s %d %s' % (chain, old_index, rule))
old_index += 1
return statements
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import resultstoresearchapi.invocation_pb2 as invocation__pb2
import resultstoresearchapi.resultstore_download_pb2 as resultstore__download__pb2
class ResultStoreDownloadStub(object):
"""This is the interface used to download information from the ResultStore
database.
Most APIs require setting a response FieldMask via the 'fields' URL query
parameter or the X-Goog-FieldMask HTTP/gRPC header.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SearchInvocations = channel.unary_unary(
'/resultstoresearch.v1.ResultStoreDownload/SearchInvocations',
request_serializer=resultstore__download__pb2.SearchInvocationsRequest.SerializeToString,
response_deserializer=resultstore__download__pb2.SearchInvocationsResponse.FromString,
)
self.GetInvocation = channel.unary_unary(
'/resultstoresearch.v1.ResultStoreDownload/GetInvocation',
request_serializer=resultstore__download__pb2.GetInvocationRequest.SerializeToString,
response_deserializer=invocation__pb2.Invocation.FromString,
)
self.ListTargets = channel.unary_unary(
'/resultstoresearch.v1.ResultStoreDownload/ListTargets',
request_serializer=resultstore__download__pb2.ListTargetsRequest.SerializeToString,
response_deserializer=resultstore__download__pb2.ListTargetsResponse.FromString,
)
self.ListTargetSubFiles = channel.unary_unary(
'/resultstoresearch.v1.ResultStoreDownload/ListTargetSubFiles',
request_serializer=resultstore__download__pb2.ListTargetSubFilesRequest.SerializeToString,
response_deserializer=resultstore__download__pb2.ListTargetSubFilesResponse.FromString,
)
self.GetFile = channel.unary_stream(
'/resultstoresearch.v1.ResultStoreDownload/GetFile',
request_serializer=resultstore__download__pb2.GetFileRequest.SerializeToString,
response_deserializer=resultstore__download__pb2.GetFileResponse.FromString,
)
self.DownloadFile = channel.unary_unary(
'/resultstoresearch.v1.ResultStoreDownload/DownloadFile',
request_serializer=resultstore__download__pb2.DownloadFileRequest.SerializeToString,
response_deserializer=resultstore__download__pb2.DownloadFileResponse.FromString,
)
self.GetInitialState = channel.unary_unary(
'/resultstoresearch.v1.ResultStoreDownload/GetInitialState',
request_serializer=resultstore__download__pb2.GetInitialStateRequest.SerializeToString,
response_deserializer=resultstore__download__pb2.GetInitialStateResponse.FromString,
)
self.GetTestCases = channel.unary_unary(
'/resultstoresearch.v1.ResultStoreDownload/GetTestCases',
request_serializer=resultstore__download__pb2.GetTestCasesRequest.SerializeToString,
response_deserializer=resultstore__download__pb2.GetTestCasesResponse.FromString,
)
class ResultStoreDownloadServicer(object):
"""This is the interface used to download information from the ResultStore
database.
Most APIs require setting a response FieldMask via the 'fields' URL query
parameter or the X-Goog-FieldMask HTTP/gRPC header.
"""
def SearchInvocations(self, request, context):
"""Searches for invocations matching the given query parameters. Results will
be ordered by timing.start_time with most recent first, but total ordering
of results is not guaranteed when difference in timestamps is very small.
Results may be stale.
An error will be reported in the following cases:
- If a query string is not provided
- If no field mask was given.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInvocation(self, request, context):
"""Retrieves the invocation with the given name.
An error will be reported in the following cases:
- If the invocation is not found.
- If the given invocation name is badly formatted.
- If no field mask was given.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTargets(self, request, context):
"""Retrieves all targets for a parent invocation. This might be limited by
user or server, in which case a continuation token is provided.
The order in which results are returned is undefined, but stable.
An error will be reported in the following cases:
- If the parent is not found.
- If the given parent name is badly formatted.
- If no field mask was given.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTargetSubFiles(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFile(self, request, context):
"""Retrieves the File with the given uri.
returns a stream of bytes to be stitched together in order.
An error will be reported in the following cases:
- If the File is not found.
- If the given File uri is badly formatted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DownloadFile(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInitialState(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTestCases(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ResultStoreDownloadServicer_to_server(servicer, server):
rpc_method_handlers = {
'SearchInvocations': grpc.unary_unary_rpc_method_handler(
servicer.SearchInvocations,
request_deserializer=resultstore__download__pb2.SearchInvocationsRequest.FromString,
response_serializer=resultstore__download__pb2.SearchInvocationsResponse.SerializeToString,
),
'GetInvocation': grpc.unary_unary_rpc_method_handler(
servicer.GetInvocation,
request_deserializer=resultstore__download__pb2.GetInvocationRequest.FromString,
response_serializer=invocation__pb2.Invocation.SerializeToString,
),
'ListTargets': grpc.unary_unary_rpc_method_handler(
servicer.ListTargets,
request_deserializer=resultstore__download__pb2.ListTargetsRequest.FromString,
response_serializer=resultstore__download__pb2.ListTargetsResponse.SerializeToString,
),
'ListTargetSubFiles': grpc.unary_unary_rpc_method_handler(
servicer.ListTargetSubFiles,
request_deserializer=resultstore__download__pb2.ListTargetSubFilesRequest.FromString,
response_serializer=resultstore__download__pb2.ListTargetSubFilesResponse.SerializeToString,
),
'GetFile': grpc.unary_stream_rpc_method_handler(
servicer.GetFile,
request_deserializer=resultstore__download__pb2.GetFileRequest.FromString,
response_serializer=resultstore__download__pb2.GetFileResponse.SerializeToString,
),
'DownloadFile': grpc.unary_unary_rpc_method_handler(
servicer.DownloadFile,
request_deserializer=resultstore__download__pb2.DownloadFileRequest.FromString,
response_serializer=resultstore__download__pb2.DownloadFileResponse.SerializeToString,
),
'GetInitialState': grpc.unary_unary_rpc_method_handler(
servicer.GetInitialState,
request_deserializer=resultstore__download__pb2.GetInitialStateRequest.FromString,
response_serializer=resultstore__download__pb2.GetInitialStateResponse.SerializeToString,
),
'GetTestCases': grpc.unary_unary_rpc_method_handler(
servicer.GetTestCases,
request_deserializer=resultstore__download__pb2.GetTestCasesRequest.FromString,
response_serializer=resultstore__download__pb2.GetTestCasesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'resultstoresearch.v1.ResultStoreDownload', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ResultStoreDownload(object):
"""This is the interface used to download information from the ResultStore
database.
Most APIs require setting a response FieldMask via the 'fields' URL query
parameter or the X-Goog-FieldMask HTTP/gRPC header.
"""
@staticmethod
def SearchInvocations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/resultstoresearch.v1.ResultStoreDownload/SearchInvocations',
resultstore__download__pb2.SearchInvocationsRequest.SerializeToString,
resultstore__download__pb2.SearchInvocationsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetInvocation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/resultstoresearch.v1.ResultStoreDownload/GetInvocation',
resultstore__download__pb2.GetInvocationRequest.SerializeToString,
invocation__pb2.Invocation.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListTargets(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/resultstoresearch.v1.ResultStoreDownload/ListTargets',
resultstore__download__pb2.ListTargetsRequest.SerializeToString,
resultstore__download__pb2.ListTargetsResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListTargetSubFiles(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/resultstoresearch.v1.ResultStoreDownload/ListTargetSubFiles',
resultstore__download__pb2.ListTargetSubFilesRequest.SerializeToString,
resultstore__download__pb2.ListTargetSubFilesResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetFile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/resultstoresearch.v1.ResultStoreDownload/GetFile',
resultstore__download__pb2.GetFileRequest.SerializeToString,
resultstore__download__pb2.GetFileResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DownloadFile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/resultstoresearch.v1.ResultStoreDownload/DownloadFile',
resultstore__download__pb2.DownloadFileRequest.SerializeToString,
resultstore__download__pb2.DownloadFileResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetInitialState(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/resultstoresearch.v1.ResultStoreDownload/GetInitialState',
resultstore__download__pb2.GetInitialStateRequest.SerializeToString,
resultstore__download__pb2.GetInitialStateResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTestCases(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/resultstoresearch.v1.ResultStoreDownload/GetTestCases',
resultstore__download__pb2.GetTestCasesRequest.SerializeToString,
resultstore__download__pb2.GetTestCasesResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
|
|
# Copyright 2013 Josh Durgin
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_serialization import jsonutils
from six.moves import urllib
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import assisted_volume_snapshots \
as assisted_snaps_v21
from nova.api.openstack.compute import volumes as volumes_v21
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import vm_states
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.volume import cinder
CONF = nova.conf.CONF
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
def fake_get_instance(self, context, instance_id, expected_attrs=None):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
def fake_get_volume(self, context, id):
return {'id': FAKE_UUID_A,
'status': 'available',
'attach_status': 'detached'
}
def fake_attach_volume(self, context, instance, volume_id, device):
pass
def fake_detach_volume(self, context, instance, volume):
pass
def fake_swap_volume(self, context, instance,
old_volume_id, new_volume_id):
pass
def fake_create_snapshot(self, context, volume, name, description):
return {'id': 123,
'volume_id': 'fakeVolId',
'status': 'available',
'volume_size': 123,
'created_at': '2013-01-01 00:00:01',
'display_name': 'myVolumeName',
'display_description': 'myVolumeDescription'}
def fake_delete_snapshot(self, context, snapshot_id):
pass
def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def fake_compute_volume_snapshot_create(self, context, volume_id,
create_info):
pass
@classmethod
def fake_bdm_list_get_by_instance_uuid(cls, context, instance_uuid):
db_list = [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake0',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_A,
'volume_size': 1}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'instance_uuid': instance_uuid,
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': FAKE_UUID_B,
'volume_size': 1})]
item_cls = objects.BlockDeviceMapping
return base.obj_make_list(context, cls(), item_cls, db_list)
class BootFromVolumeTest(test.TestCase):
def setUp(self):
super(BootFromVolumeTest, self).setUp()
self.stubs.Set(compute_api.API, 'create',
self._get_fake_compute_api_create())
fakes.stub_out_nw_api(self)
self._block_device_mapping_seen = None
self._legacy_bdm_seen = True
def _get_fake_compute_api_create(self):
def _fake_compute_api_create(cls, context, instance_type,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
inst_type = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,
'user_id': 'fake',
'project_id': 'fake',
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
'progress': 0,
'fixed_ips': []
}], resv_id)
return _fake_compute_api_create
def test_create_root_volume(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping=[dict(
volume_id='ca9fe3f5-cede-43cb-8050-1672acabe348',
device_name='/dev/vda',
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app_v21(
init_only=('os-volumes', 'servers')))
self.assertEqual(202, res.status_int)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(1, len(self._block_device_mapping_seen))
self.assertTrue(self._legacy_bdm_seen)
self.assertEqual('ca9fe3f5-cede-43cb-8050-1672acabe348',
self._block_device_mapping_seen[0]['volume_id'])
self.assertEqual('/dev/vda',
self._block_device_mapping_seen[0]['device_name'])
def test_create_root_volume_bdm_v2(self):
body = dict(server=dict(
name='test_server', imageRef=IMAGE_UUID,
flavorRef=2, min_count=1, max_count=1,
block_device_mapping_v2=[dict(
source_type='volume',
uuid='1',
device_name='/dev/vda',
boot_index=0,
delete_on_termination=False,
)]
))
req = fakes.HTTPRequest.blank('/v2/fake/os-volumes_boot')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
res = req.get_response(fakes.wsgi_app_v21(
init_only=('os-volumes', 'servers')))
self.assertEqual(202, res.status_int)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(1, len(self._block_device_mapping_seen))
self.assertFalse(self._legacy_bdm_seen)
self.assertEqual('1', self._block_device_mapping_seen[0]['volume_id'])
self.assertEqual(0, self._block_device_mapping_seen[0]['boot_index'])
self.assertEqual('/dev/vda',
self._block_device_mapping_seen[0]['device_name'])
class VolumeApiTestV21(test.NoDBTestCase):
url_prefix = '/v2/fake'
def setUp(self):
super(VolumeApiTestV21, self).setUp()
fakes.stub_out_networking(self)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
self.context = context.get_admin_context()
@property
def app(self):
return fakes.wsgi_app_v21(init_only=('os-volumes', 'servers'))
def test_volume_create(self):
self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('volume', resp_dict)
self.assertEqual(vol['size'], resp_dict['volume']['size'])
self.assertEqual(vol['display_name'],
resp_dict['volume']['displayName'])
self.assertEqual(vol['display_description'],
resp_dict['volume']['displayDescription'])
self.assertEqual(vol['availability_zone'],
resp_dict['volume']['availabilityZone'])
def _test_volume_translate_exception(self, cinder_exc, api_exc):
"""Tests that cinder exceptions are correctly translated"""
def fake_volume_create(self, context, size, name, description,
snapshot, **param):
raise cinder_exc
self.stubs.Set(cinder.API, "create", fake_volume_create)
vol = {"size": '10',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(api_exc,
volumes_v21.VolumeController().create, req,
body=body)
@mock.patch.object(cinder.API, 'get_snapshot')
@mock.patch.object(cinder.API, 'create')
def test_volume_create_bad_snapshot_id(self, mock_create, mock_get):
vol = {"snapshot_id": '1', "size": 10}
body = {"volume": vol}
mock_get.side_effect = exception.SnapshotNotFound(snapshot_id='1')
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
self.assertRaises(webob.exc.HTTPNotFound,
volumes_v21.VolumeController().create, req,
body=body)
def test_volume_create_bad_input(self):
self._test_volume_translate_exception(
exception.InvalidInput(reason='fake'), webob.exc.HTTPBadRequest)
def test_volume_create_bad_quota(self):
self._test_volume_translate_exception(
exception.OverQuota(overs='fake'), webob.exc.HTTPForbidden)
def test_volume_index(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
def test_volume_detail(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/detail')
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
def test_volume_show(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
resp = req.get_response(self.app)
self.assertEqual(200, resp.status_int)
def test_volume_show_no_volume(self):
self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
resp = req.get_response(self.app)
self.assertEqual(404, resp.status_int)
self.assertIn('Volume 456 could not be found.', resp.body)
def test_volume_delete(self):
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/123')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(202, resp.status_int)
def test_volume_delete_no_volume(self):
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes/456')
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(404, resp.status_int)
self.assertIn('Volume 456 could not be found.', resp.body)
class VolumeAttachTestsV21(test.NoDBTestCase):
validation_error = exception.ValidationError
def setUp(self):
super(VolumeAttachTestsV21, self).setUp()
self.stub_out('nova.objects.BlockDeviceMappingList'
'.get_by_instance_uuid',
fake_bdm_list_get_by_instance_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.context = context.get_admin_context()
self.expected_show = {'volumeAttachment':
{'device': '/dev/fake0',
'serverId': FAKE_UUID,
'id': FAKE_UUID_A,
'volumeId': FAKE_UUID_A
}}
self._set_up_controller()
def _set_up_controller(self):
self.attachments = volumes_v21.VolumeAttachmentController()
def test_show(self):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
self.assertEqual(self.expected_show, result)
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
def test_show_no_instance(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=None)
def test_show_no_bdms(self, mock_mr):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_show_bdms_no_mountpoint(self):
FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.show,
req,
FAKE_UUID,
FAKE_UUID_NOTEXIST)
def test_detach(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
@mock.patch.object(common, 'get_instance')
def test_detach_vol_shelved_not_supported(self, mock_get_instance):
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid', version='2.19')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_A)
@mock.patch.object(compute_api.API, 'detach_volume')
@mock.patch.object(common, 'get_instance')
def test_detach_vol_shelved_supported(self,
mock_get_instance,
mock_detach):
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid', version='2.20')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
self.assertTrue(mock_detach.called)
def test_detach_vol_not_found(self):
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_C)
@mock.patch('nova.objects.BlockDeviceMapping.is_root',
new_callable=mock.PropertyMock)
def test_detach_vol_root(self, mock_isroot):
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
mock_isroot.return_value = True
self.assertRaises(exc.HTTPForbidden,
self.attachments.delete,
req,
FAKE_UUID,
FAKE_UUID_A)
def test_detach_volume_from_locked_server(self):
def fake_detach_volume_from_locked_server(self, context,
instance, volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'detach_volume',
fake_detach_volume_from_locked_server)
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'DELETE'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
req, FAKE_UUID, FAKE_UUID_A)
def test_attach_volume(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
@mock.patch.object(common, 'get_instance')
def test_attach_vol_shelved_not_supported(self, mock_get_instance):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments',
version='2.19')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict,
self.attachments.create,
req,
FAKE_UUID,
body=body)
@mock.patch.object(compute_api.API, 'attach_volume',
return_value='/dev/myfake')
@mock.patch.object(common, 'get_instance')
def test_attach_vol_shelved_supported(self,
mock_get_instance,
mock_attach):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
inst = fake_instance.fake_instance_obj(self.context,
**{'uuid': FAKE_UUID})
inst.vm_state = vm_states.SHELVED
mock_get_instance.return_value = inst
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments',
version='2.20')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
self.assertEqual('/dev/myfake', result['volumeAttachment']['device'])
@mock.patch.object(compute_api.API, 'attach_volume',
return_value='/dev/myfake')
def test_attach_volume_with_auto_device(self, mock_attach):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': None}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = self.attachments.create(req, FAKE_UUID, body=body)
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
self.assertEqual('/dev/myfake', result['volumeAttachment']['device'])
def test_attach_volume_to_locked_server(self):
def fake_attach_volume_to_locked_server(self, context, instance,
volume_id, device=None):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume_to_locked_server)
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_bad_id(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None,
'volumeId': 'TESTVOLUME',
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'attach_volume',
side_effect=exception.DevicePathInUse(path='/dev/sda'))
def test_attach_volume_device_in_use(self, mock_attach):
body = {
'volumeAttachment': {
'device': '/dev/sda',
'volumeId': FAKE_UUID_A,
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_without_volumeId(self):
self.stubs.Set(compute_api.API,
'attach_volume',
fake_attach_volume)
body = {
'volumeAttachment': {
'device': None
}
}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
def test_attach_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake',
'extra': 'extra_arg'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(self.validation_error, self.attachments.create,
req, FAKE_UUID, body=body)
@mock.patch.object(compute_api.API, 'attach_volume')
def test_attach_volume_with_invalid_input(self, mock_attach):
mock_attach.side_effect = exception.InvalidInput(
reason='Invalid volume')
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
req, FAKE_UUID, body=body)
def _test_swap(self, attachments, uuid=FAKE_UUID_A,
fake_func=None, body=None):
fake_func = fake_func or fake_swap_volume
self.stubs.Set(compute_api.API,
'swap_volume',
fake_func)
body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
req = fakes.HTTPRequest.blank(
'/v2/servers/id/os-volume_attachments/uuid')
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
return attachments.update(req, FAKE_UUID, uuid, body=body)
def test_swap_volume_for_locked_server(self):
def fake_swap_volume_for_locked_server(self, context, instance,
old_volume, new_volume):
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
self.attachments,
fake_func=fake_swap_volume_for_locked_server)
def test_swap_volume(self):
result = self._test_swap(self.attachments)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
volumes_v21.VolumeAttachmentController):
status_int = self.attachments.update.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_swap_volume_with_nonexistent_uri(self):
self.assertRaises(exc.HTTPNotFound, self._test_swap,
self.attachments, uuid=FAKE_UUID_C)
@mock.patch.object(cinder.API, 'get')
def test_swap_volume_with_nonexistent_dest_in_body(self, mock_update):
mock_update.side_effect = [
None, exception.VolumeNotFound(volume_id=FAKE_UUID_C)]
body = {'volumeAttachment': {'volumeId': FAKE_UUID_C}}
self.assertRaises(exc.HTTPBadRequest, self._test_swap,
self.attachments, body=body)
def test_swap_volume_without_volumeId(self):
body = {'volumeAttachment': {'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
def test_swap_volume_with_extra_arg(self):
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self.assertRaises(self.validation_error,
self._test_swap,
self.attachments,
body=body)
class CommonBadRequestTestCase(object):
resource = None
entity_name = None
controller_cls = None
kwargs = {}
bad_request = exc.HTTPBadRequest
"""
Tests of places we throw 400 Bad Request from
"""
def setUp(self):
super(CommonBadRequestTestCase, self).setUp()
self.controller = self.controller_cls()
def _bad_request_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
req.method = 'POST'
kwargs = self.kwargs.copy()
kwargs['body'] = body
self.assertRaises(self.bad_request,
self.controller.create, req, **kwargs)
def test_create_no_body(self):
self._bad_request_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._bad_request_create(body=body)
def test_create_malformed_entity(self):
body = {self.entity_name: 'string'}
self._bad_request_create(body=body)
class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-volumes'
entity_name = 'volume'
controller_cls = volumes_v21.VolumeController
bad_request = exception.ValidationError
@mock.patch.object(cinder.API, 'delete',
side_effect=exception.InvalidInput(reason='vol attach'))
def test_delete_invalid_status_volume(self, mock_delete):
req = fakes.HTTPRequest.blank('/v2.1/os-volumes')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, req, FAKE_UUID)
class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
test.NoDBTestCase):
resource = 'os-snapshots'
entity_name = 'snapshot'
controller_cls = volumes_v21.SnapshotController
bad_request = exception.ValidationError
class AssistedSnapshotCreateTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
bad_request = exception.ValidationError
def setUp(self):
super(AssistedSnapshotCreateTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_create',
fake_compute_volume_snapshot_create)
def test_assisted_create(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
req.method = 'POST'
self.controller.create(req, body=body)
def test_assisted_create_missing_create_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {'snapshot': {'volume_id': '1'}}
req.method = 'POST'
self.assertRaises(self.bad_request, self.controller.create,
req, body=body)
def test_assisted_create_with_unexpected_attr(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
body = {
'snapshot': {
'volume_id': '1',
'create_info': {
'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'
}
},
'unexpected': 0,
}
req.method = 'POST'
self.assertRaises(self.bad_request, self.controller.create,
req, body=body)
class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
assisted_snaps = assisted_snaps_v21
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, controller_method.wsgi_code)
def setUp(self):
super(AssistedSnapshotDeleteTestCaseV21, self).setUp()
self.controller = \
self.assisted_snaps.AssistedVolumeSnapshotsController()
self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
fake_compute_volume_snapshot_delete)
def test_assisted_delete(self):
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-assisted-volume-snapshots?%s' %
urllib.parse.urlencode(params))
req.method = 'DELETE'
result = self.controller.delete(req, '5')
self._check_status(204, result, self.controller.delete)
def test_assisted_delete_missing_delete_info(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '5')
class TestAssistedVolumeSnapshotsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestAssistedVolumeSnapshotsPolicyEnforcementV21, self).setUp()
self.controller = (
assisted_snaps_v21.AssistedVolumeSnapshotsController())
self.req = fakes.HTTPRequest.blank('')
def test_create_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:create"
self.policy.set_rules({rule_name: "project:non_fake"})
body = {'snapshot':
{'volume_id': '1',
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_assisted_volumes_snapshots_policy_failed(self):
rule_name = "os_compute_api:os-assisted-volume-snapshots:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, '5')
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class TestVolumeAttachPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TestVolumeAttachPolicyEnforcementV21, self).setUp()
self.controller = volumes_v21.VolumeAttachmentController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, rules, rule_name, func, *arg, **kwarg):
self.policy.set_rules(rules)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes-attachments:index"
rules = {rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name,
self.controller.index, self.req, FAKE_UUID)
def test_show_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:show": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:show"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.show,
self.req, FAKE_UUID, FAKE_UUID_A)
def test_create_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:create": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
'device': '/dev/fake'}}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
rule_name = "os_compute_api:os-volumes-attachments:create"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.create,
self.req, FAKE_UUID, body=body)
def test_update_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:update": "@",
rule_name: "project:non_fake"}
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
rule_name = "os_compute_api:os-volumes-attachments:update"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.update,
self.req, FAKE_UUID, FAKE_UUID_A, body=body)
def test_delete_volume_attach_policy_failed(self):
rule_name = "os_compute_api:os-volumes"
rules = {"os_compute_api:os-volumes-attachments:delete": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
rule_name = "os_compute_api:os-volumes-attachments:delete"
rules = {"os_compute_api:os-volumes": "@",
rule_name: "project:non_fake"}
self._common_policy_check(rules, rule_name, self.controller.delete,
self.req, FAKE_UUID, FAKE_UUID_A)
class TestVolumesAPIDeprecation(test.NoDBTestCase):
def setUp(self):
super(TestVolumesAPIDeprecation, self).setUp()
self.controller = volumes_v21.VolumeController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.detail, self.req)
|
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client for the google.longrunning.operations meta-API.
This is a client that deals with long-running operations that follow the
pattern outlined by the `Google API Style Guide`_.
When an API method normally takes long time to complete, it can be designed to
return ``Operation`` to the client, and the client can use this interface to
receive the real response asynchronously by polling the operation resource to
receive the response.
It is not a separate service, but rather an interface implemented by a larger
service. The protocol-level definition is available at
`google/longrunning/operations.proto`_. Typically, this will be constructed
automatically by another client class to deal with operations.
.. _Google API Style Guide:
https://cloud.google.com/apis/design/design_pattern
s#long_running_operations
.. _google/longrunning/operations.proto:
https://github.com/googleapis/googleapis/blob/master/google/longrunning
/operations.proto
"""
import functools
from google.api_core import gapic_v1
from google.api_core import page_iterator
from google.api_core.operations_v1 import operations_client_config
from google.longrunning import operations_pb2
class OperationsClient(object):
"""Client for interacting with long-running operations within a service.
Args:
channel (grpc.Channel): The gRPC channel associated with the service
that implements the ``google.longrunning.operations`` interface.
client_config (dict):
A dictionary of call options for each method. If not specified
the default configuration is used.
"""
def __init__(self, channel, client_config=operations_client_config.config):
# Create the gRPC client stub.
self.operations_stub = operations_pb2.OperationsStub(channel)
# Create all wrapped methods using the interface configuration.
# The interface config contains all of the default settings for retry
# and timeout for each RPC method.
interfaces = client_config["interfaces"]
interface_config = interfaces["google.longrunning.Operations"]
method_configs = gapic_v1.config.parse_method_configs(interface_config)
self._get_operation = gapic_v1.method.wrap_method(
self.operations_stub.GetOperation,
default_retry=method_configs["GetOperation"].retry,
default_timeout=method_configs["GetOperation"].timeout,
)
self._list_operations = gapic_v1.method.wrap_method(
self.operations_stub.ListOperations,
default_retry=method_configs["ListOperations"].retry,
default_timeout=method_configs["ListOperations"].timeout,
)
self._cancel_operation = gapic_v1.method.wrap_method(
self.operations_stub.CancelOperation,
default_retry=method_configs["CancelOperation"].retry,
default_timeout=method_configs["CancelOperation"].timeout,
)
self._delete_operation = gapic_v1.method.wrap_method(
self.operations_stub.DeleteOperation,
default_retry=method_configs["DeleteOperation"].retry,
default_timeout=method_configs["DeleteOperation"].timeout,
)
# Service calls
def get_operation(
self,
name,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
metadata=None,
):
"""Gets the latest state of a long-running operation.
Clients can use this method to poll the operation result at intervals
as recommended by the API service.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> response = api.get_operation(name)
Args:
name (str): The name of the operation resource.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
metadata (Optional[List[Tuple[str, str]]]):
Additional gRPC metadata.
Returns:
google.longrunning.operations_pb2.Operation: The state of the
operation.
Raises:
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
request = operations_pb2.GetOperationRequest(name=name)
# Add routing header
metadata = metadata or []
metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name}))
return self._get_operation(
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_operations(
self,
name,
filter_,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists operations that match the specified filter in the request.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>>
>>> # Iterate over all results
>>> for operation in api.list_operations(name):
>>> # process operation
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> iter = api.list_operations(name)
>>> for page in iter.pages:
>>> for operation in page:
>>> # process operation
>>> pass
Args:
name (str): The name of the operation collection.
filter_ (str): The standard list filter.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
metadata (Optional[List[Tuple[str, str]]]): Additional gRPC
metadata.
Returns:
google.api_core.page_iterator.Iterator: An iterator that yields
:class:`google.longrunning.operations_pb2.Operation` instances.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.ListOperationsRequest(name=name, filter=filter_)
# Add routing header
metadata = metadata or []
metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name}))
# Create the method used to fetch pages
method = functools.partial(
self._list_operations, retry=retry, timeout=timeout, metadata=metadata
)
iterator = page_iterator.GRPCIterator(
client=None,
method=method,
request=request,
items_field="operations",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def cancel_operation(
self,
name,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
metadata=None,
):
"""Starts asynchronous cancellation on a long-running operation.
The server makes a best effort to cancel the operation, but success is
not guaranteed. Clients can use :meth:`get_operation` or service-
specific methods to check whether the cancellation succeeded or whether
the operation completed despite cancellation. On successful
cancellation, the operation is not deleted; instead, it becomes an
operation with an ``Operation.error`` value with a
``google.rpc.Status.code`` of ``1``, corresponding to
``Code.CANCELLED``.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> api.cancel_operation(name)
Args:
name (str): The name of the operation resource to be cancelled.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
metadata (Optional[List[Tuple[str, str]]]): Additional gRPC
metadata.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.CancelOperationRequest(name=name)
# Add routing header
metadata = metadata or []
metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name}))
self._cancel_operation(request, retry=retry, timeout=timeout, metadata=metadata)
def delete_operation(
self,
name,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
metadata=None,
):
"""Deletes a long-running operation.
This method indicates that the client is no longer interested in the
operation result. It does not cancel the operation.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> api.delete_operation(name)
Args:
name (str): The name of the operation resource to be deleted.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
metadata (Optional[List[Tuple[str, str]]]): Additional gRPC
metadata.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.DeleteOperationRequest(name=name)
# Add routing header
metadata = metadata or []
metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name}))
self._delete_operation(request, retry=retry, timeout=timeout, metadata=metadata)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Takaaki Suzuki, Midokura Japan KK
# @author: Tomoe Sugihara, Midokura Japan KK
# @author: Ryu Ishimoto, Midokura Japan KK
# @author: Rossella Sblendido, Midokura Japan KK
# @author: Duarte Nunes, Midokura Japan KK
from midonetclient import api
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import portbindings_db
from neutron.db import securitygroups_db
from neutron.extensions import external_net as ext_net
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.midonet.common import config # noqa
from neutron.plugins.midonet.common import net_util
from neutron.plugins.midonet import midonet_lib
LOG = logging.getLogger(__name__)
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
METADATA_DEFAULT_IP = "169.254.169.254/32"
OS_FLOATING_IP_RULE_KEY = 'OS_FLOATING_IP'
OS_SG_RULE_KEY = 'OS_SG_RULE_ID'
OS_TENANT_ROUTER_RULE_KEY = 'OS_TENANT_ROUTER_RULE'
PRE_ROUTING_CHAIN_NAME = "OS_PRE_ROUTING_%s"
PORT_INBOUND_CHAIN_NAME = "OS_PORT_%s_INBOUND"
PORT_OUTBOUND_CHAIN_NAME = "OS_PORT_%s_OUTBOUND"
POST_ROUTING_CHAIN_NAME = "OS_POST_ROUTING_%s"
SG_INGRESS_CHAIN_NAME = "OS_SG_%s_INGRESS"
SG_EGRESS_CHAIN_NAME = "OS_SG_%s_EGRESS"
SG_PORT_GROUP_NAME = "OS_PG_%s"
SNAT_RULE = 'SNAT'
def _get_nat_ips(type, fip):
"""Get NAT IP address information.
From the route type given, determine the source and target IP addresses
from the provided floating IP DB object.
"""
if type == 'pre-routing':
return fip["floating_ip_address"], fip["fixed_ip_address"]
elif type == 'post-routing':
return fip["fixed_ip_address"], fip["floating_ip_address"]
else:
raise ValueError(_("Invalid nat_type %s") % type)
def _nat_chain_names(router_id):
"""Get the chain names for NAT.
These names are used to associate MidoNet chains to the NAT rules
applied to the router. For each of these, there are two NAT types,
'dnat' and 'snat' that are returned as keys, and the corresponding
chain names as their values.
"""
pre_routing_name = PRE_ROUTING_CHAIN_NAME % router_id
post_routing_name = POST_ROUTING_CHAIN_NAME % router_id
return {'pre-routing': pre_routing_name, 'post-routing': post_routing_name}
def _sg_chain_names(sg_id):
"""Get the chain names for security group.
These names are used to associate a security group to MidoNet chains.
There are two names for ingress and egress security group directions.
"""
ingress = SG_INGRESS_CHAIN_NAME % sg_id
egress = SG_EGRESS_CHAIN_NAME % sg_id
return {'ingress': ingress, 'egress': egress}
def _port_chain_names(port_id):
"""Get the chain names for a port.
These are chains to hold security group chains.
"""
inbound = PORT_INBOUND_CHAIN_NAME % port_id
outbound = PORT_OUTBOUND_CHAIN_NAME % port_id
return {'inbound': inbound, 'outbound': outbound}
def _sg_port_group_name(sg_id):
"""Get the port group name for security group..
This name is used to associate a security group to MidoNet port groups.
"""
return SG_PORT_GROUP_NAME % sg_id
def _rule_direction(sg_direction):
"""Convert the SG direction to MidoNet direction
MidoNet terms them 'inbound' and 'outbound' instead of 'ingress' and
'egress'. Also, the direction is reversed since MidoNet sees it
from the network port's point of view, not the VM's.
"""
if sg_direction == 'ingress':
return 'outbound'
elif sg_direction == 'egress':
return 'inbound'
else:
raise ValueError(_("Unrecognized direction %s") % sg_direction)
def _is_router_interface_port(port):
"""Check whether the given port is a router interface port."""
device_owner = port['device_owner']
return (device_owner in l3_db.DEVICE_OWNER_ROUTER_INTF)
def _is_router_gw_port(port):
"""Check whether the given port is a router gateway port."""
device_owner = port['device_owner']
return (device_owner in l3_db.DEVICE_OWNER_ROUTER_GW)
def _is_vif_port(port):
"""Check whether the given port is a standard VIF port."""
device_owner = port['device_owner']
return (not _is_dhcp_port(port) and
device_owner not in (l3_db.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF))
def _is_dhcp_port(port):
"""Check whether the given port is a DHCP port."""
device_owner = port['device_owner']
return device_owner.startswith(constants.DEVICE_OWNER_DHCP)
def _check_resource_exists(func, id, name, raise_exc=False):
"""Check whether the given resource exists in MidoNet data store."""
try:
func(id)
except midonet_lib.MidonetResourceNotFound as exc:
LOG.error(_("There is no %(name)s with ID %(id)s in MidoNet."),
{"name": name, "id": id})
if raise_exc:
raise MidonetPluginException(msg=exc)
class MidoRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def create_rpc_dispatcher(self):
"""Get the rpc dispatcher for this manager.
This a basic implementation that will call the plugin like get_ports
and handle basic events
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
"""
return n_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
class MidonetPluginException(n_exc.NeutronException):
message = _("%(msg)s")
class MidonetPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
portbindings_db.PortBindingMixin,
external_net_db.External_net_db_mixin,
l3_db.L3_NAT_db_mixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
securitygroups_db.SecurityGroupDbMixin):
supported_extension_aliases = ['external-net', 'router', 'security-group',
'agent', 'dhcp_agent_scheduler', 'binding']
__native_bulk_support = False
def __init__(self):
super(MidonetPluginV2, self).__init__()
# Read config values
midonet_conf = cfg.CONF.MIDONET
midonet_uri = midonet_conf.midonet_uri
admin_user = midonet_conf.username
admin_pass = midonet_conf.password
admin_project_id = midonet_conf.project_id
self.provider_router_id = midonet_conf.provider_router_id
self.provider_router = None
self.mido_api = api.MidonetApi(midonet_uri, admin_user,
admin_pass,
project_id=admin_project_id)
self.client = midonet_lib.MidoClient(self.mido_api)
# self.provider_router_id should have been set.
if self.provider_router_id is None:
msg = _('provider_router_id should be configured in the plugin '
'config file')
LOG.exception(msg)
raise MidonetPluginException(msg=msg)
self.setup_rpc()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
def _get_provider_router(self):
if self.provider_router is None:
self.provider_router = self.client.get_router(
self.provider_router_id)
return self.provider_router
def _dhcp_mappings(self, context, fixed_ips, mac):
for fixed_ip in fixed_ips:
subnet = self._get_subnet(context, fixed_ip["subnet_id"])
if subnet["ip_version"] == 6:
# TODO(ryu) handle IPv6
continue
if not subnet["enable_dhcp"]:
# Skip if DHCP is disabled
continue
yield subnet['cidr'], fixed_ip["ip_address"], mac
def _metadata_subnets(self, context, fixed_ips):
for fixed_ip in fixed_ips:
subnet = self._get_subnet(context, fixed_ip["subnet_id"])
if subnet["ip_version"] == 6:
continue
yield subnet['cidr'], fixed_ip["ip_address"]
def _initialize_port_chains(self, port, in_chain, out_chain, sg_ids):
tenant_id = port["tenant_id"]
position = 1
# mac spoofing protection
self._add_chain_rule(in_chain, action='drop',
dl_src=port["mac_address"], inv_dl_src=True,
position=position)
# ip spoofing protection
for fixed_ip in port["fixed_ips"]:
position += 1
self._add_chain_rule(in_chain, action="drop",
src_addr=fixed_ip["ip_address"] + "/32",
inv_nw_src=True, dl_type=0x0800, # IPv4
position=position)
# conntrack
position += 1
self._add_chain_rule(in_chain, action='accept',
match_forward_flow=True,
position=position)
# Reset the position to process egress
position = 1
# Add rule for SGs
if sg_ids:
for sg_id in sg_ids:
chain_name = _sg_chain_names(sg_id)["ingress"]
chain = self.client.get_chain_by_name(tenant_id, chain_name)
self._add_chain_rule(out_chain, action='jump',
jump_chain_id=chain.get_id(),
jump_chain_name=chain_name,
position=position)
position += 1
# add reverse flow matching at the end
self._add_chain_rule(out_chain, action='accept',
match_return_flow=True,
position=position)
position += 1
# fall back DROP rule at the end except for ARP
self._add_chain_rule(out_chain, action='drop',
dl_type=0x0806, # ARP
inv_dl_type=True, position=position)
def _bind_port_to_sgs(self, context, port, sg_ids):
self._process_port_create_security_group(context, port, sg_ids)
if sg_ids is not None:
for sg_id in sg_ids:
pg_name = _sg_port_group_name(sg_id)
self.client.add_port_to_port_group_by_name(
port["tenant_id"], pg_name, port["id"])
def _unbind_port_from_sgs(self, context, port_id):
self._delete_port_security_group_bindings(context, port_id)
self.client.remove_port_from_port_groups(port_id)
def _create_accept_chain_rule(self, context, sg_rule, chain=None):
direction = sg_rule["direction"]
tenant_id = sg_rule["tenant_id"]
sg_id = sg_rule["security_group_id"]
chain_name = _sg_chain_names(sg_id)[direction]
if chain is None:
chain = self.client.get_chain_by_name(tenant_id, chain_name)
pg_id = None
if sg_rule["remote_group_id"] is not None:
pg_name = _sg_port_group_name(sg_id)
pg = self.client.get_port_group_by_name(tenant_id, pg_name)
pg_id = pg.get_id()
props = {OS_SG_RULE_KEY: str(sg_rule["id"])}
# Determine source or destination address by looking at direction
src_pg_id = dst_pg_id = None
src_addr = dst_addr = None
src_port_to = dst_port_to = None
src_port_from = dst_port_from = None
if direction == "egress":
dst_pg_id = pg_id
dst_addr = sg_rule["remote_ip_prefix"]
dst_port_from = sg_rule["port_range_min"]
dst_port_to = sg_rule["port_range_max"]
else:
src_pg_id = pg_id
src_addr = sg_rule["remote_ip_prefix"]
src_port_from = sg_rule["port_range_min"]
src_port_to = sg_rule["port_range_max"]
return self._add_chain_rule(
chain, action='accept', port_group_src=src_pg_id,
port_group_dst=dst_pg_id,
src_addr=src_addr, src_port_from=src_port_from,
src_port_to=src_port_to,
dst_addr=dst_addr, dst_port_from=dst_port_from,
dst_port_to=dst_port_to,
nw_proto=net_util.get_protocol_value(sg_rule["protocol"]),
dl_type=net_util.get_ethertype_value(sg_rule["ethertype"]),
properties=props)
def _remove_nat_rules(self, context, fip):
router = self.client.get_router(fip["router_id"])
self.client.remove_static_route(self._get_provider_router(),
fip["floating_ip_address"])
chain_names = _nat_chain_names(router.get_id())
for _type, name in chain_names.iteritems():
self.client.remove_rules_by_property(
router.get_tenant_id(), name,
OS_FLOATING_IP_RULE_KEY, fip["id"])
def setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.callbacks = MidoRpcCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def create_subnet(self, context, subnet):
"""Create Neutron subnet.
Creates a Neutron subnet and a DHCP entry in MidoNet bridge.
"""
LOG.debug(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet)
s = subnet["subnet"]
net = super(MidonetPluginV2, self).get_network(
context, subnet['subnet']['network_id'], fields=None)
session = context.session
with session.begin(subtransactions=True):
sn_entry = super(MidonetPluginV2, self).create_subnet(context,
subnet)
bridge = self.client.get_bridge(sn_entry['network_id'])
gateway_ip = s['gateway_ip']
cidr = s['cidr']
if s['enable_dhcp']:
dns_nameservers = None
host_routes = None
if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED:
dns_nameservers = s['dns_nameservers']
if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED:
host_routes = s['host_routes']
self.client.create_dhcp(bridge, gateway_ip, cidr,
host_rts=host_routes,
dns_servers=dns_nameservers)
# For external network, link the bridge to the provider router.
if net['router:external']:
self._link_bridge_to_gw_router(
bridge, self._get_provider_router(), gateway_ip, cidr)
LOG.debug(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"),
sn_entry)
return sn_entry
def delete_subnet(self, context, id):
"""Delete Neutron subnet.
Delete neutron network and its corresponding MidoNet bridge.
"""
LOG.debug(_("MidonetPluginV2.delete_subnet called: id=%s"), id)
subnet = super(MidonetPluginV2, self).get_subnet(context, id,
fields=None)
net = super(MidonetPluginV2, self).get_network(context,
subnet['network_id'],
fields=None)
session = context.session
with session.begin(subtransactions=True):
super(MidonetPluginV2, self).delete_subnet(context, id)
bridge = self.client.get_bridge(subnet['network_id'])
if subnet['enable_dhcp']:
self.client.delete_dhcp(bridge, subnet['cidr'])
# If the network is external, clean up routes, links, ports
if net[ext_net.EXTERNAL]:
self._unlink_bridge_from_gw_router(
bridge, self._get_provider_router())
LOG.debug(_("MidonetPluginV2.delete_subnet exiting"))
def create_network(self, context, network):
"""Create Neutron network.
Create a new Neutron network and its corresponding MidoNet bridge.
"""
LOG.debug(_('MidonetPluginV2.create_network called: network=%r'),
network)
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
net_data['tenant_id'] = tenant_id
self._ensure_default_security_group(context, tenant_id)
bridge = self.client.create_bridge(**net_data)
net_data['id'] = bridge.get_id()
session = context.session
with session.begin(subtransactions=True):
net = super(MidonetPluginV2, self).create_network(context, network)
self._process_l3_create(context, net, net_data)
LOG.debug(_("MidonetPluginV2.create_network exiting: net=%r"), net)
return net
def update_network(self, context, id, network):
"""Update Neutron network.
Update an existing Neutron network and its corresponding MidoNet
bridge.
"""
LOG.debug(_("MidonetPluginV2.update_network called: id=%(id)r, "
"network=%(network)r"), {'id': id, 'network': network})
session = context.session
with session.begin(subtransactions=True):
net = super(MidonetPluginV2, self).update_network(
context, id, network)
self._process_l3_update(context, net, network['network'])
self.client.update_bridge(id, **network['network'])
LOG.debug(_("MidonetPluginV2.update_network exiting: net=%r"), net)
return net
def get_network(self, context, id, fields=None):
"""Get Neutron network.
Retrieves a Neutron network and its corresponding MidoNet bridge.
"""
LOG.debug(_("MidonetPluginV2.get_network called: id=%(id)r, "
"fields=%(fields)r"), {'id': id, 'fields': fields})
qnet = super(MidonetPluginV2, self).get_network(context, id, fields)
self.client.get_bridge(id)
LOG.debug(_("MidonetPluginV2.get_network exiting: qnet=%r"), qnet)
return qnet
def delete_network(self, context, id):
"""Delete a network and its corresponding MidoNet bridge."""
LOG.debug(_("MidonetPluginV2.delete_network called: id=%r"), id)
self.client.delete_bridge(id)
try:
super(MidonetPluginV2, self).delete_network(context, id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to delete neutron db, while Midonet '
'bridge=%r had been deleted'), id)
def create_port(self, context, port):
"""Create a L2 port in Neutron/MidoNet."""
LOG.debug(_("MidonetPluginV2.create_port called: port=%r"), port)
port_data = port['port']
# Create a bridge port in MidoNet and set the bridge port ID as the
# port ID in Neutron.
bridge = self.client.get_bridge(port_data["network_id"])
tenant_id = bridge.get_tenant_id()
asu = port_data.get("admin_state_up", True)
bridge_port = self.client.add_bridge_port(bridge,
admin_state_up=asu)
port_data["id"] = bridge_port.get_id()
try:
session = context.session
with session.begin(subtransactions=True):
# Create a Neutron port
new_port = super(MidonetPluginV2, self).create_port(context,
port)
port_data.update(new_port)
self._ensure_default_security_group_on_port(context,
port)
if _is_vif_port(port_data):
# Bind security groups to the port
sg_ids = self._get_security_groups_on_port(context, port)
self._bind_port_to_sgs(context, new_port, sg_ids)
# Create port chains
port_chains = {}
for d, name in _port_chain_names(
new_port["id"]).iteritems():
port_chains[d] = self.client.create_chain(tenant_id,
name)
self._initialize_port_chains(port_data,
port_chains['inbound'],
port_chains['outbound'],
sg_ids)
# Update the port with the chain
self.client.update_port_chains(
bridge_port, port_chains["inbound"].get_id(),
port_chains["outbound"].get_id())
# DHCP mapping is only for VIF ports
for cidr, ip, mac in self._dhcp_mappings(
context, port_data["fixed_ips"],
port_data["mac_address"]):
self.client.add_dhcp_host(bridge, cidr, ip, mac)
elif _is_dhcp_port(port_data):
# For DHCP port, add a metadata route
for cidr, ip in self._metadata_subnets(
context, port_data["fixed_ips"]):
self.client.add_dhcp_route_option(bridge, cidr, ip,
METADATA_DEFAULT_IP)
self._process_portbindings_create_and_update(context,
port_data, new_port)
except Exception as ex:
# Try removing the MidoNet port before raising an exception.
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to create a port on network %(net_id)s: "
"%(err)s"),
{"net_id": port_data["network_id"], "err": ex})
self.client.delete_port(bridge_port.get_id())
LOG.debug(_("MidonetPluginV2.create_port exiting: port=%r"), new_port)
return new_port
def get_port(self, context, id, fields=None):
"""Retrieve port."""
LOG.debug(_("MidonetPluginV2.get_port called: id=%(id)s "
"fields=%(fields)r"), {'id': id, 'fields': fields})
port = super(MidonetPluginV2, self).get_port(context, id, fields)
"Check if the port exists in MidoNet DB"""
try:
self.client.get_port(id)
except midonet_lib.MidonetResourceNotFound as exc:
LOG.error(_("There is no port with ID %(id)s in MidoNet."),
{"id": id})
port['status'] = constants.PORT_STATUS_ERROR
raise exc
LOG.debug(_("MidonetPluginV2.get_port exiting: port=%r"), port)
return port
def get_ports(self, context, filters=None, fields=None):
"""List neutron ports and verify that they exist in MidoNet."""
LOG.debug(_("MidonetPluginV2.get_ports called: filters=%(filters)s "
"fields=%(fields)r"),
{'filters': filters, 'fields': fields})
ports = super(MidonetPluginV2, self).get_ports(context, filters,
fields)
return ports
def delete_port(self, context, id, l3_port_check=True):
"""Delete a neutron port and corresponding MidoNet bridge port."""
LOG.debug(_("MidonetPluginV2.delete_port called: id=%(id)s "
"l3_port_check=%(l3_port_check)r"),
{'id': id, 'l3_port_check': l3_port_check})
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
device_id = port['device_id']
# If this port is for router interface/gw, unlink and delete.
if _is_router_interface_port(port):
self._unlink_bridge_from_router(device_id, id)
elif _is_router_gw_port(port):
# Gateway removed
# Remove all the SNAT rules that are tagged.
router = self._get_router(context, device_id)
tenant_id = router["tenant_id"]
chain_names = _nat_chain_names(device_id)
for _type, name in chain_names.iteritems():
self.client.remove_rules_by_property(
tenant_id, name, OS_TENANT_ROUTER_RULE_KEY,
SNAT_RULE)
# Remove the default routes and unlink
self._remove_router_gateway(port['device_id'])
self.client.delete_port(id, delete_chains=True)
try:
for cidr, ip, mac in self._dhcp_mappings(
context, port["fixed_ips"], port["mac_address"]):
self.client.delete_dhcp_host(port["network_id"], cidr, ip,
mac)
except Exception:
LOG.error(_("Failed to delete DHCP mapping for port %(id)s"),
{"id": id})
super(MidonetPluginV2, self).delete_port(context, id)
def update_port(self, context, id, port):
"""Handle port update, including security groups and fixed IPs."""
with context.session.begin(subtransactions=True):
# Get the port and save the fixed IPs
old_port = self._get_port(context, id)
net_id = old_port["network_id"]
mac = old_port["mac_address"]
old_ips = old_port["fixed_ips"]
# update the port DB
p = super(MidonetPluginV2, self).update_port(context, id, port)
if "admin_state_up" in port["port"]:
asu = port["port"]["admin_state_up"]
mido_port = self.client.update_port(id, admin_state_up=asu)
# If we're changing the admin_state_up flag and the port is
# associated with a router, then we also need to update the
# peer port.
if _is_router_interface_port(p):
self.client.update_port(mido_port.get_peer_id(),
admin_state_up=asu)
new_ips = p["fixed_ips"]
if new_ips:
bridge = self.client.get_bridge(net_id)
# If it's a DHCP port, add a route to reach the MD server
if _is_dhcp_port(p):
for cidr, ip in self._metadata_subnets(
context, new_ips):
self.client.add_dhcp_route_option(
bridge, cidr, ip, METADATA_DEFAULT_IP)
else:
# IPs have changed. Re-map the DHCP entries
for cidr, ip, mac in self._dhcp_mappings(
context, old_ips, mac):
self.client.remove_dhcp_host(
bridge, cidr, ip, mac)
for cidr, ip, mac in self._dhcp_mappings(
context, new_ips, mac):
self.client.add_dhcp_host(
bridge, cidr, ip, mac)
if (self._check_update_deletes_security_groups(port) or
self._check_update_has_security_groups(port)):
self._unbind_port_from_sgs(context, p["id"])
sg_ids = self._get_security_groups_on_port(context, port)
self._bind_port_to_sgs(context, p, sg_ids)
self._process_portbindings_create_and_update(context,
port['port'],
p)
return p
def create_router(self, context, router):
"""Handle router creation.
When a new Neutron router is created, its corresponding MidoNet router
is also created. In MidoNet, this router is initialized with chains
for inbound and outbound traffic, which will be used to hold other
chains that include various rules, such as NAT.
:param router: Router information provided to create a new router.
"""
# NOTE(dcahill): Similar to the NSX plugin, we completely override
# this method in order to be able to use the MidoNet ID as Neutron ID
# TODO(dcahill): Propose upstream patch for allowing
# 3rd parties to specify IDs as we do with l2 plugin
LOG.debug(_("MidonetPluginV2.create_router called: router=%(router)s"),
{"router": router})
r = router['router']
tenant_id = self._get_tenant_id_for_create(context, r)
r['tenant_id'] = tenant_id
mido_router = self.client.create_router(**r)
mido_router_id = mido_router.get_id()
try:
has_gw_info = False
if EXTERNAL_GW_INFO in r:
has_gw_info = True
gw_info = r.pop(EXTERNAL_GW_INFO)
with context.session.begin(subtransactions=True):
# pre-generate id so it will be available when
# configuring external gw port
router_db = l3_db.Router(id=mido_router_id,
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status="ACTIVE")
context.session.add(router_db)
if has_gw_info:
self._update_router_gw_info(context, router_db['id'],
gw_info)
router_data = self._make_router_dict(router_db,
process_extensions=False)
except Exception:
# Try removing the midonet router
with excutils.save_and_reraise_exception():
self.client.delete_router(mido_router_id)
# Create router chains
chain_names = _nat_chain_names(mido_router_id)
try:
self.client.add_router_chains(mido_router,
chain_names["pre-routing"],
chain_names["post-routing"])
except Exception:
# Set the router status to Error
with context.session.begin(subtransactions=True):
r = self._get_router(context, router_data["id"])
router_data['status'] = constants.NET_STATUS_ERROR
r['status'] = router_data['status']
context.session.add(r)
LOG.debug(_("MidonetPluginV2.create_router exiting: "
"router_data=%(router_data)s."),
{"router_data": router_data})
return router_data
def _set_router_gateway(self, id, gw_router, gw_ip):
"""Set router uplink gateway
:param ID: ID of the router
:param gw_router: gateway router to link to
:param gw_ip: gateway IP address
"""
LOG.debug(_("MidonetPluginV2.set_router_gateway called: id=%(id)s, "
"gw_router=%(gw_router)s, gw_ip=%(gw_ip)s"),
{'id': id, 'gw_router': gw_router, 'gw_ip': gw_ip}),
router = self.client.get_router(id)
# Create a port in the gw router
gw_port = self.client.add_router_port(gw_router,
port_address='169.254.255.1',
network_address='169.254.255.0',
network_length=30)
# Create a port in the router
port = self.client.add_router_port(router,
port_address='169.254.255.2',
network_address='169.254.255.0',
network_length=30)
# Link them
self.client.link(gw_port, port.get_id())
# Add a route for gw_ip to bring it down to the router
self.client.add_router_route(gw_router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=gw_ip,
dst_network_length=32,
next_hop_port=gw_port.get_id(),
weight=100)
# Add default route to uplink in the router
self.client.add_router_route(router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr='0.0.0.0',
dst_network_length=0,
next_hop_port=port.get_id(),
weight=100)
def _remove_router_gateway(self, id):
"""Clear router gateway
:param ID: ID of the router
"""
LOG.debug(_("MidonetPluginV2.remove_router_gateway called: "
"id=%(id)s"), {'id': id})
router = self.client.get_router(id)
# delete the port that is connected to the gateway router
for p in router.get_ports():
if p.get_port_address() == '169.254.255.2':
peer_port_id = p.get_peer_id()
if peer_port_id is not None:
self.client.unlink(p)
self.client.delete_port(peer_port_id)
# delete default route
for r in router.get_routes():
if (r.get_dst_network_addr() == '0.0.0.0' and
r.get_dst_network_length() == 0):
self.client.delete_route(r.get_id())
def update_router(self, context, id, router):
"""Handle router updates."""
LOG.debug(_("MidonetPluginV2.update_router called: id=%(id)s "
"router=%(router)r"), {"id": id, "router": router})
router_data = router["router"]
# Check if the update included changes to the gateway.
gw_updated = l3_db.EXTERNAL_GW_INFO in router_data
with context.session.begin(subtransactions=True):
# Update the Neutron DB
r = super(MidonetPluginV2, self).update_router(context, id,
router)
tenant_id = r["tenant_id"]
if gw_updated:
if (l3_db.EXTERNAL_GW_INFO in r and
r[l3_db.EXTERNAL_GW_INFO] is not None):
# Gateway created
gw_port_neutron = self._get_port(
context.elevated(), r["gw_port_id"])
gw_ip = gw_port_neutron['fixed_ips'][0]['ip_address']
# First link routers and set up the routes
self._set_router_gateway(r["id"],
self._get_provider_router(),
gw_ip)
gw_port_midonet = self.client.get_link_port(
self._get_provider_router(), r["id"])
# Get the NAT chains and add dynamic SNAT rules.
chain_names = _nat_chain_names(r["id"])
props = {OS_TENANT_ROUTER_RULE_KEY: SNAT_RULE}
self.client.add_dynamic_snat(tenant_id,
chain_names['pre-routing'],
chain_names['post-routing'],
gw_ip,
gw_port_midonet.get_id(),
**props)
self.client.update_router(id, **router_data)
LOG.debug(_("MidonetPluginV2.update_router exiting: router=%r"), r)
return r
def delete_router(self, context, id):
"""Handler for router deletion.
Deleting a router on Neutron simply means deleting its corresponding
router in MidoNet.
:param id: router ID to remove
"""
LOG.debug(_("MidonetPluginV2.delete_router called: id=%s"), id)
self.client.delete_router_chains(id)
self.client.delete_router(id)
super(MidonetPluginV2, self).delete_router(context, id)
def _link_bridge_to_gw_router(self, bridge, gw_router, gw_ip, cidr):
"""Link a bridge to the gateway router
:param bridge: bridge
:param gw_router: gateway router to link to
:param gw_ip: IP address of gateway
:param cidr: network CIDR
"""
net_addr, net_len = net_util.net_addr(cidr)
# create a port on the gateway router
gw_port = self.client.add_router_port(gw_router, port_address=gw_ip,
network_address=net_addr,
network_length=net_len)
# create a bridge port, then link it to the router.
port = self.client.add_bridge_port(bridge)
self.client.link(gw_port, port.get_id())
# add a route for the subnet in the gateway router
self.client.add_router_route(gw_router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=net_addr,
dst_network_length=net_len,
next_hop_port=gw_port.get_id(),
weight=100)
def _unlink_bridge_from_gw_router(self, bridge, gw_router):
"""Unlink a bridge from the gateway router
:param bridge: bridge to unlink
:param gw_router: gateway router to unlink from
"""
# Delete routes and unlink the router and the bridge.
routes = self.client.get_router_routes(gw_router.get_id())
bridge_ports_to_delete = [
p for p in gw_router.get_peer_ports()
if p.get_device_id() == bridge.get_id()]
for p in bridge.get_peer_ports():
if p.get_device_id() == gw_router.get_id():
# delete the routes going to the bridge
for r in routes:
if r.get_next_hop_port() == p.get_id():
self.client.delete_route(r.get_id())
self.client.unlink(p)
self.client.delete_port(p.get_id())
# delete bridge port
for port in bridge_ports_to_delete:
self.client.delete_port(port.get_id())
def _link_bridge_to_router(self, router, bridge_port, net_addr, net_len,
gw_ip, metadata_gw_ip):
router_port = self.client.add_router_port(
router, network_length=net_len, network_address=net_addr,
port_address=gw_ip, admin_state_up=bridge_port['admin_state_up'])
self.client.link(router_port, bridge_port['id'])
self.client.add_router_route(router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=net_addr,
dst_network_length=net_len,
next_hop_port=router_port.get_id(),
weight=100)
if metadata_gw_ip:
# Add a route for the metadata server.
# Not all VM images supports DHCP option 121. Add a route for the
# Metadata server in the router to forward the packet to the bridge
# that will send them to the Metadata Proxy.
md_net_addr, md_net_len = net_util.net_addr(METADATA_DEFAULT_IP)
self.client.add_router_route(
router, type='Normal', src_network_addr=net_addr,
src_network_length=net_len,
dst_network_addr=md_net_addr,
dst_network_length=md_net_len,
next_hop_port=router_port.get_id(),
next_hop_gateway=metadata_gw_ip)
def _unlink_bridge_from_router(self, router_id, bridge_port_id):
"""Unlink a bridge from a router."""
# Remove the routes to the port and unlink the port
bridge_port = self.client.get_port(bridge_port_id)
routes = self.client.get_router_routes(router_id)
self.client.delete_port_routes(routes, bridge_port.get_peer_id())
self.client.unlink(bridge_port)
def add_router_interface(self, context, router_id, interface_info):
"""Handle router linking with network."""
LOG.debug(_("MidonetPluginV2.add_router_interface called: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r"),
{'router_id': router_id, 'interface_info': interface_info})
with context.session.begin(subtransactions=True):
info = super(MidonetPluginV2, self).add_router_interface(
context, router_id, interface_info)
try:
subnet = self._get_subnet(context, info["subnet_id"])
cidr = subnet["cidr"]
net_addr, net_len = net_util.net_addr(cidr)
router = self.client.get_router(router_id)
# Get the metadata GW IP
metadata_gw_ip = None
rport_qry = context.session.query(models_v2.Port)
dhcp_ports = rport_qry.filter_by(
network_id=subnet["network_id"],
device_owner=constants.DEVICE_OWNER_DHCP).all()
if dhcp_ports and dhcp_ports[0].fixed_ips:
metadata_gw_ip = dhcp_ports[0].fixed_ips[0].ip_address
else:
LOG.warn(_("DHCP agent is not working correctly. No port "
"to reach the Metadata server on this network"))
# Link the router and the bridge
port = super(MidonetPluginV2, self).get_port(context,
info["port_id"])
self._link_bridge_to_router(router, port, net_addr,
net_len, subnet["gateway_ip"],
metadata_gw_ip)
except Exception:
LOG.error(_("Failed to create MidoNet resources to add router "
"interface. info=%(info)s, router_id=%(router_id)s"),
{"info": info, "router_id": router_id})
with excutils.save_and_reraise_exception():
with context.session.begin(subtransactions=True):
self.remove_router_interface(context, router_id, info)
LOG.debug(_("MidonetPluginV2.add_router_interface exiting: "
"info=%r"), info)
return info
def _assoc_fip(self, fip):
router = self.client.get_router(fip["router_id"])
link_port = self.client.get_link_port(
self._get_provider_router(), router.get_id())
self.client.add_router_route(
self._get_provider_router(),
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=fip["floating_ip_address"],
dst_network_length=32,
next_hop_port=link_port.get_peer_id())
props = {OS_FLOATING_IP_RULE_KEY: fip['id']}
tenant_id = router.get_tenant_id()
chain_names = _nat_chain_names(router.get_id())
for chain_type, name in chain_names.items():
src_ip, target_ip = _get_nat_ips(chain_type, fip)
if chain_type == 'pre-routing':
nat_type = 'dnat'
else:
nat_type = 'snat'
self.client.add_static_nat(tenant_id, name, src_ip,
target_ip,
link_port.get_id(),
nat_type, **props)
def create_floatingip(self, context, floatingip):
session = context.session
with session.begin(subtransactions=True):
fip = super(MidonetPluginV2, self).create_floatingip(
context, floatingip)
if fip['port_id']:
self._assoc_fip(fip)
return fip
def update_floatingip(self, context, id, floatingip):
"""Handle floating IP association and disassociation."""
LOG.debug(_("MidonetPluginV2.update_floatingip called: id=%(id)s "
"floatingip=%(floatingip)s "),
{'id': id, 'floatingip': floatingip})
session = context.session
with session.begin(subtransactions=True):
if floatingip['floatingip']['port_id']:
fip = super(MidonetPluginV2, self).update_floatingip(
context, id, floatingip)
self._assoc_fip(fip)
# disassociate floating IP
elif floatingip['floatingip']['port_id'] is None:
fip = super(MidonetPluginV2, self).get_floatingip(context, id)
self._remove_nat_rules(context, fip)
super(MidonetPluginV2, self).update_floatingip(context, id,
floatingip)
LOG.debug(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip)
return fip
def disassociate_floatingips(self, context, port_id):
"""Disassociate floating IPs (if any) from this port."""
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_dbs = fip_qry.filter_by(fixed_port_id=port_id)
for fip_db in fip_dbs:
self._remove_nat_rules(context, fip_db)
except sa_exc.NoResultFound:
pass
super(MidonetPluginV2, self).disassociate_floatingips(context, port_id)
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
Create a new security group, including the default security group.
In MidoNet, this means creating a pair of chains, inbound and outbound,
as well as a new port group.
"""
LOG.debug(_("MidonetPluginV2.create_security_group called: "
"security_group=%(security_group)s "
"default_sg=%(default_sg)s "),
{'security_group': security_group, 'default_sg': default_sg})
sg = security_group.get('security_group')
tenant_id = self._get_tenant_id_for_create(context, sg)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
# Create the Neutron sg first
sg = super(MidonetPluginV2, self).create_security_group(
context, security_group, default_sg)
try:
# Process the MidoNet side
self.client.create_port_group(tenant_id,
_sg_port_group_name(sg["id"]))
chain_names = _sg_chain_names(sg["id"])
chains = {}
for direction, chain_name in chain_names.iteritems():
c = self.client.create_chain(tenant_id, chain_name)
chains[direction] = c
# Create all the rules for this SG. Only accept rules are created
for r in sg['security_group_rules']:
self._create_accept_chain_rule(context, r,
chain=chains[r['direction']])
except Exception:
LOG.error(_("Failed to create MidoNet resources for sg %(sg)r"),
{"sg": sg})
with excutils.save_and_reraise_exception():
with context.session.begin(subtransactions=True):
sg = self._get_security_group(context, sg["id"])
context.session.delete(sg)
LOG.debug(_("MidonetPluginV2.create_security_group exiting: sg=%r"),
sg)
return sg
def delete_security_group(self, context, id):
"""Delete chains for Neutron security group."""
LOG.debug(_("MidonetPluginV2.delete_security_group called: id=%s"), id)
with context.session.begin(subtransactions=True):
sg = super(MidonetPluginV2, self).get_security_group(context, id)
if not sg:
raise ext_sg.SecurityGroupNotFound(id=id)
if sg["name"] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
sg_id = sg['id']
filters = {'security_group_id': [sg_id]}
if super(MidonetPluginV2, self)._get_port_security_group_bindings(
context, filters):
raise ext_sg.SecurityGroupInUse(id=sg_id)
# Delete MidoNet Chains and portgroup for the SG
tenant_id = sg['tenant_id']
self.client.delete_chains_by_names(
tenant_id, _sg_chain_names(sg["id"]).values())
self.client.delete_port_group_by_name(
tenant_id, _sg_port_group_name(sg["id"]))
super(MidonetPluginV2, self).delete_security_group(context, id)
def create_security_group_rule(self, context, security_group_rule):
"""Create a security group rule
Create a security group rule in the Neutron DB and corresponding
MidoNet resources in its data store.
"""
LOG.debug(_("MidonetPluginV2.create_security_group_rule called: "
"security_group_rule=%(security_group_rule)r"),
{'security_group_rule': security_group_rule})
with context.session.begin(subtransactions=True):
rule = super(MidonetPluginV2, self).create_security_group_rule(
context, security_group_rule)
self._create_accept_chain_rule(context, rule)
LOG.debug(_("MidonetPluginV2.create_security_group_rule exiting: "
"rule=%r"), rule)
return rule
def delete_security_group_rule(self, context, sg_rule_id):
"""Delete a security group rule
Delete a security group rule from the Neutron DB and corresponding
MidoNet resources from its data store.
"""
LOG.debug(_("MidonetPluginV2.delete_security_group_rule called: "
"sg_rule_id=%s"), sg_rule_id)
with context.session.begin(subtransactions=True):
rule = super(MidonetPluginV2, self).get_security_group_rule(
context, sg_rule_id)
if not rule:
raise ext_sg.SecurityGroupRuleNotFound(id=sg_rule_id)
sg = self._get_security_group(context,
rule["security_group_id"])
chain_name = _sg_chain_names(sg["id"])[rule["direction"]]
self.client.remove_rules_by_property(rule["tenant_id"], chain_name,
OS_SG_RULE_KEY,
str(rule["id"]))
super(MidonetPluginV2, self).delete_security_group_rule(
context, sg_rule_id)
def _add_chain_rule(self, chain, action, **kwargs):
nw_proto = kwargs.get("nw_proto")
src_addr = kwargs.pop("src_addr", None)
dst_addr = kwargs.pop("dst_addr", None)
src_port_from = kwargs.pop("src_port_from", None)
src_port_to = kwargs.pop("src_port_to", None)
dst_port_from = kwargs.pop("dst_port_from", None)
dst_port_to = kwargs.pop("dst_port_to", None)
# Convert to the keys and values that midonet client understands
if src_addr:
kwargs["nw_src_addr"], kwargs["nw_src_length"] = net_util.net_addr(
src_addr)
if dst_addr:
kwargs["nw_dst_addr"], kwargs["nw_dst_length"] = net_util.net_addr(
dst_addr)
kwargs["tp_src"] = {"start": src_port_from, "end": src_port_to}
kwargs["tp_dst"] = {"start": dst_port_from, "end": dst_port_to}
if nw_proto == 1: # ICMP
# Overwrite port fields regardless of the direction
kwargs["tp_src"] = {"start": src_port_from, "end": src_port_from}
kwargs["tp_dst"] = {"start": dst_port_to, "end": dst_port_to}
return self.client.add_chain_rule(chain, action=action, **kwargs)
|
|
# -*- coding: utf-8 -*-
import sys
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from tqdm import tqdm
from winnaker.helpers import *
from winnaker.settings import *
ERROR_LIST = {
"Whitelabel Error Page": "Check clouddriver",
"No hosted service provider is configured": "Check Gate",
"no alias was selected": "TBD",
"This application has no explicit mapping for": "TBD",
"so you are seeing this as a fallback.": "Check Gate",
"Error Code: Throttling": "Check Edda/Cloud Provider",
"Rate exceeded": "Check Edda",
"This application has no explicit mapping for /error": "Check Deck"}
class Spinnaker():
def __init__(self):
chrome_options = Options()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
# self.driver = webdriver.Firefox()
time.sleep(1)
self.driver.get(cfg_spinnaker_url)
self.wait = WebDriverWait(self.driver, 10)
if not os.path.exists(cfg_output_files_path):
os.makedirs(cfg_output_files_path)
def authorize(self):
logging.debug("Authorizing OAuth Request")
try:
e = wait_for_xpath_presence(self.driver, cfg_oauth_authorize_xpath, stop_max_attempt=2)
self.driver.save_screenshot(join(
cfg_output_files_path, "authorize.png"))
e.click()
except:
logging.debug("couldn't find authorize xpath. This is OK if the user has already been authorized, looked for: %s"
% cfg_oauth_authorize_xpath)
def login(self):
self.check_page_contains_error()
e = wait_for_xpath_presence(self.driver, cfg_usernamebox_xpath)
logging.debug(
"Logging in as: {}".format(
cfg_spinnaker_username))
e.send_keys(cfg_spinnaker_username)
e = wait_for_xpath_presence(self.driver, cfg_passwordbox_xpath)
self.driver.save_screenshot(join(
cfg_output_files_path, "login.png"))
e.send_keys(cfg_spinnaker_password)
e = wait_for_xpath_presence(self.driver, cfg_signin_button_xpath)
e.click()
logging.info(
"- Logged in to the spinnaker instance at {}".format(cfg_spinnaker_url))
self.driver.save_screenshot(join(cfg_output_files_path, "login2.png"))
time.sleep(3)
def get_application(self, appname):
self.check_page_contains_error()
e = wait_for_xpath_presence(
self.driver, cfg_applications_xpath, be_clickable=True)
e.click()
e = wait_for_xpath_presence(self.driver, cfg_searchbox_xpath)
e.send_keys(appname)
e.send_keys(Keys.RETURN)
time.sleep(1)
self.driver.save_screenshot(join(
cfg_output_files_path,
"applications.png"))
app_xpath = "//a[contains (.,'" + appname + "')]"
e = wait_for_xpath_presence(self.driver, app_xpath)
e.click()
time.sleep(1)
logging.info("- Searched for application: {}".format(appname))
def get_pipelines(self, appname):
self.get_application(appname)
pipelines_xpath = "//a[@href='#/applications/" + \
appname + "/executions']"
e = wait_for_xpath_presence(self.driver, pipelines_xpath)
e.click()
def get_pipeline(self, appname, pipelinename):
self.check_page_contains_error()
self.get_pipelines(appname)
time.sleep(0.5)
checkbox = "//div[@class='nav']//execution-filters//label[contains(.,' %s')]/input[@type='checkbox']" % pipelinename
e = wait_for_xpath_presence(
self.driver, checkbox, be_clickable=True)
move_to_element(self.driver, e, click=True)
time.sleep(2)
if not e.get_attribute('checked'):
e = wait_for_xpath_presence(
self.driver, checkbox, be_clickable=True)
e.click()
time.sleep(2)
self.driver.save_screenshot(
join(cfg_output_files_path, "pipelines.png"))
logging.info(
"- Selected pipeline: {} successfully".format(pipelinename))
def start_manual_execution(self, force_bake=False):
self.check_page_contains_error()
# starts the 1st pipeline which is currently on the page
e = wait_for_xpath_presence(
self.driver, cfg_start_manual_execution_xpath)
click_stubborn(self.driver, e, cfg_start_manual_execution_xpath)
time.sleep(2)
if force_bake:
e = wait_for_xpath_presence(
self.driver, cfg_force_rebake_xpath, be_clickable=True)
move_to_element(self.driver, e, click=True)
time.sleep(2)
if not e.get_attribute('checked'):
e = wait_for_xpath_presence(
self.driver, cfg_force_rebake_xpath, be_clickable=True)
logging.info("Checking force bake option")
e.click()
self.driver.save_screenshot(
join(cfg_output_files_path, "force_bake_check.png"))
run_xpath = "//button[@type='submit' and contains(.,'Run')]/span[1]"
e = wait_for_xpath_presence(self.driver, run_xpath, be_clickable=True)
e.click()
time.sleep(2)
start_time = time.time()
logging.info("- Starting Manual Execution")
time.sleep(10) # To give enough time for pipeline kick off show up
logging.info("\t Running ... (will wait up to {} minutes".format(
int(cfg_max_wait_for_pipeline_run_mins / 60)))
for i in tqdm(range(int(cfg_max_wait_for_pipeline_run_mins / 10))):
if time.time() - start_time > cfg_max_wait_for_pipeline_run_mins:
logging.error("The run is taking more than {} minutes".format(
int(cfg_max_wait_for_pipeline_run_mins / 60)))
logging.error("Considering it as an error")
sys.exit(1)
status = self.get_last_build().status
if "RUNNING" in status:
time.sleep(10)
elif "NOT_STARTED" in status:
logging.info("Pipeline has not yet started.")
time.sleep(10)
elif "SUCCEEDED" in status:
logging.info("Congratulations pipeline run was successful.")
print_passed()
self.get_stages(n=cfg_number_of_stages_to_check)
return 0
elif "TERMINAL" in status:
logging.error(
"Pipeline stopped with terminal state. screenshot generated.")
print_failed()
sys.exit(1)
else:
logging.error("Error: something went wrong {}".format(status))
sys.exit(2)
def get_last_build(self):
execution_summary = wait_for_xpath_presence(
self.driver, cfg_execution_summary_xp)
trigger_details = wait_for_xpath_presence(
self.driver, cfg_trigger_details_xp)
self.build = Build(trigger_details.text, execution_summary.text)
time.sleep(1)
wait_for_xpath_presence(self.driver, cfg_detail_xpath)
self.driver.save_screenshot(join(
cfg_output_files_path,
"last_build_status.png"))
return self.build
# TODO Get all the stages automatically
def get_stages(self, n=cfg_number_of_stages_to_check):
# n number of stages to get
for i in range(1, n + 1):
stage_xpath = "//div[@class='stages']/span[%s]/div" % str(i)
e = wait_for_xpath_presence(
self.driver, stage_xpath, be_clickable=True)
move_to_element(self.driver, e)
e.click()
alert_box_xps = ["//div[@class='alert alert-danger']",
"//div[@class='well alert alert-info']",
"//div[@class='alert alert-info']"]
for xpath in alert_box_xps:
try:
e = self.wait.until(
EC.presence_of_element_located((By.XPATH, xpath)))
logging.info("- Stage Detail: \n\t" +
e.text.replace("\n", "\n\t\n"))
for error in ERROR_LIST:
if error in e.text:
logging.error("\t Stage Failed: {}".format(e.text))
sys.exit(1)
except TimeoutException:
continue
self.driver.save_screenshot(
join(
cfg_output_files_path,
"stage_" +
str(i) +
".png"))
def check_page_contains_error(self):
for error in ERROR_LIST.keys():
if error in get_body_text(self.driver):
logging.error("- Failed for: {}".format(error))
logging.info("- Suggestion: {}".format(ERROR_LIST[error]))
print_failed()
sys.exit(1)
assert error not in get_body_text(self.driver)
# Represents a build of a pipeline
class Build():
def __init__(self, trigger_details, execution_summary):
try:
self.status = execution_summary.split(
"\n")[0].replace("Status: ", "")
self.duration = execution_summary.split(
"\n")[1].replace("Duration: ", "")
self.type_of_start = ""
self.username = trigger_details.split("\n")[0]
logging.debug("Username: {}".format(self.username))
if " CDT" in trigger_details:
self.datetime_started = datetime.strptime(
trigger_details.split("\n")[1].replace(
" CDT", ""), '%Y-%m-%d %H:%M:%S')
# TO DO: convert ago to UTC time
if " ago" in trigger_details:
self.datetime_started = trigger_details.split("\n")[1]
self.detail = trigger_details.split("\n")[2]
self.stack = trigger_details.split("\n")[3]
except (ValueError, IndexError):
pass
def status_is_valid(self):
if self.status in ["RUNNING", "SUCCEEDED", "TERMINAL", "CANCELED"]:
return True
return False
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
import os
import urllib
import urllib3
import certifi
from knack.log import get_logger
from azure.cli.core.azclierror import (RequiredArgumentMissingError, ValidationError, ResourceNotFoundError)
from azure.cli.core.commands.parameters import get_subscription_locations
from azure.cli.core.util import should_disable_connection_verify
from msrestazure.tools import parse_resource_id
from ._client_factory import web_client_factory
logger = get_logger(__name__)
REQUESTS_CA_BUNDLE = "REQUESTS_CA_BUNDLE"
def str2bool(v):
if v == 'true':
retval = True
elif v == 'false':
retval = False
else:
retval = None
return retval
def _normalize_sku(sku):
sku = sku.upper()
if sku == 'FREE':
return 'F1'
if sku == 'SHARED':
return 'D1'
return sku
def get_sku_tier(name): # pylint: disable=too-many-return-statements
name = name.upper()
if name in ['F1', 'FREE']:
return 'FREE'
if name in ['D1', "SHARED"]:
return 'SHARED'
if name in ['B1', 'B2', 'B3', 'BASIC']:
return 'BASIC'
if name in ['S1', 'S2', 'S3']:
return 'STANDARD'
if name in ['P1', 'P2', 'P3']:
return 'PREMIUM'
if name in ['P1V2', 'P2V2', 'P3V2']:
return 'PREMIUMV2'
if name in ['P1V3', 'P2V3', 'P3V3']:
return 'PREMIUMV3'
if name in ['PC2', 'PC3', 'PC4']:
return 'PremiumContainer'
if name in ['EP1', 'EP2', 'EP3']:
return 'ElasticPremium'
if name in ['I1', 'I2', 'I3']:
return 'Isolated'
if name in ['I1V2', 'I2V2', 'I3V2']:
return 'IsolatedV2'
if name in ['WS1', 'WS2', 'WS3']:
return 'WorkflowStandard'
raise ValidationError("Invalid sku(pricing tier), please refer to command help for valid values")
# Deprecated; Do not use
# Keeping this for now so that we don't break extensions that use it
def get_sku_name(tier):
return get_sku_tier(name=tier)
# resource is client.web_apps for webapps, client.app_service_plans for ASPs, etc.
def get_resource_if_exists(resource, **kwargs):
from azure.core.exceptions import ResourceNotFoundError as E
try:
return resource.get(**kwargs)
except E:
return None
def normalize_sku_for_staticapp(sku):
if sku.lower() == 'free':
return 'Free'
if sku.lower() == 'standard':
return 'Standard'
raise ValidationError("Invalid sku(pricing tier), please refer to command help for valid values")
def retryable_method(retries=3, interval_sec=5, excpt_type=Exception):
def decorate(func):
def call(*args, **kwargs):
current_retry = retries
while True:
try:
return func(*args, **kwargs)
except excpt_type as exception: # pylint: disable=broad-except
current_retry -= 1
if current_retry <= 0:
raise exception
time.sleep(interval_sec)
return call
return decorate
def raise_missing_token_suggestion():
pat_documentation = "https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line"
raise RequiredArgumentMissingError("GitHub access token is required to authenticate to your repositories. "
"If you need to create a Github Personal Access Token, "
"please run with the '--login-with-github' flag or follow "
"the steps found at the following link:\n{0}".format(pat_documentation))
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(webapp))
return webapp.location
# can't just normalize locations with location.lower().replace(" ", "") because of UAE/UK regions
def _normalize_location(cmd, location):
location = location.lower()
locations = get_subscription_locations(cmd.cli_ctx)
for loc in locations:
if loc.display_name.lower() == location or loc.name.lower() == location:
return loc.name
return location
def get_pool_manager(url):
proxies = urllib.request.getproxies()
bypass_proxy = urllib.request.proxy_bypass(urllib.parse.urlparse(url).hostname)
if 'https' in proxies and not bypass_proxy:
proxy = urllib.parse.urlparse(proxies['https'])
if proxy.username and proxy.password:
proxy_headers = urllib3.util.make_headers(proxy_basic_auth='{0}:{1}'.format(proxy.username, proxy.password))
logger.debug('Setting proxy-authorization header for basic auth')
else:
proxy_headers = None
logger.info('Using proxy for app service tunnel connection')
http = urllib3.ProxyManager(proxy.geturl(), proxy_headers=proxy_headers)
else:
http = urllib3.PoolManager()
if should_disable_connection_verify():
http.connection_pool_kw['cert_reqs'] = 'CERT_NONE'
else:
http.connection_pool_kw['cert_reqs'] = 'CERT_REQUIRED'
if REQUESTS_CA_BUNDLE in os.environ:
ca_bundle_file = os.environ[REQUESTS_CA_BUNDLE]
logger.debug("Using CA bundle file at '%s'.", ca_bundle_file)
if not os.path.isfile(ca_bundle_file):
raise ValidationError('REQUESTS_CA_BUNDLE environment variable is specified with an invalid file path')
else:
ca_bundle_file = certifi.where()
http.connection_pool_kw['ca_certs'] = ca_bundle_file
return http
def get_app_service_plan_from_webapp(cmd, webapp, api_version=None):
client = web_client_factory(cmd.cli_ctx, api_version=api_version)
plan = parse_resource_id(webapp.server_farm_id)
return client.app_service_plans.get(plan['resource_group'], plan['name'])
# Allows putting additional properties on an SDK model instance
def use_additional_properties(resource):
resource.enable_additional_properties_sending()
existing_properties = resource.serialize().get("properties")
resource.additional_properties["properties"] = {} if existing_properties is None else existing_properties
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
import signal
import sys
import time
import traceback
from abc import ABCMeta
from hashlib import sha256
from typing import Callable, Optional, cast
import psutil
from pants.base.build_environment import get_buildroot
from pants.bin.pants_env_vars import DAEMON_ENTRYPOINT
from pants.option.options import Options
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import GLOBAL_SCOPE
from pants.pantsd.lock import OwnerPrintingInterProcessFileLock
from pants.util.dirutil import read_file, rm_rf, safe_file_dump, safe_mkdir
from pants.util.memo import memoized_classproperty, memoized_property
logger = logging.getLogger(__name__)
class ProcessManager:
"""Manages contextual, on-disk process metadata.
Metadata is stored under a per-host fingerprinted directory, and a nested per-named-process
directory. The per-host directory defends against attempting to use process metadata that has
been mounted into virtual machines or docker images.
"""
class MetadataError(Exception):
pass
class Timeout(Exception):
pass
class NonResponsiveProcess(Exception):
pass
class NotStarted(Exception):
pass
KILL_WAIT_SEC = 5
KILL_CHAIN = (signal.SIGTERM, signal.SIGKILL)
FAIL_WAIT_SEC = 10
INFO_INTERVAL_SEC = 5
WAIT_INTERVAL_SEC = 0.1
SOCKET_KEY = "socket"
PROCESS_NAME_KEY = "process_name"
PID_KEY = "pid"
FINGERPRINT_KEY = "fingerprint"
def __init__(self, name: str, metadata_base_dir: str) -> None:
"""
:param string name: The process identity/name (e.g. 'pantsd' or 'ng_Zinc').
:param str metadata_base_dir: The overridden base directory for process metadata.
"""
super().__init__()
self._metadata_base_dir = metadata_base_dir
self._name = name.lower().strip()
# TODO: Extract process spawning code.
self._buildroot = get_buildroot()
@memoized_classproperty
def host_fingerprint(cls) -> str:
"""A fingerprint that attempts to identify the potential scope of a live process.
See the class pydoc.
In the absence of kernel hotswapping, a new uname means a restart or virtual machine, both
of which mean that process metadata is invalid. Additionally, docker generates a random
hostname per instance, which improves the reliability of this hash.
TODO: It would be nice to be able to use `uptime` (e.g. https://crates.io/crates/uptime_lib)
to identify reboots, but it's more challenging than it should be because it would involve
subtracting from the current time, which might hit aliasing issues.
"""
hasher = sha256()
for component in os.uname():
hasher.update(component.encode())
return hasher.hexdigest()[:12]
@staticmethod
def _maybe_cast(item, caster):
"""Given a casting function, attempt to cast to that type while masking common cast
exceptions.
N.B. This is mostly suitable for casting string types to numeric types - e.g. a port number
read from disk into an int.
:param func caster: A casting callable (e.g. `int`).
:returns: The result of caster(item) or item if TypeError or ValueError are raised during cast.
"""
try:
return caster(item)
except (TypeError, ValueError):
# N.B. the TypeError catch here (already) protects against the case that caster is None.
return item
@classmethod
def _deadline_until(
cls,
closure: Callable[[], bool],
ongoing_msg: str,
completed_msg: str,
timeout: float = FAIL_WAIT_SEC,
wait_interval: float = WAIT_INTERVAL_SEC,
info_interval: float = INFO_INTERVAL_SEC,
):
"""Execute a function/closure repeatedly until a True condition or timeout is met.
:param func closure: the function/closure to execute (should not block for long periods of time
and must return True on success).
:param str ongoing_msg: a description of the action that is being executed, to be rendered as
info while we wait, and as part of any rendered exception.
:param str completed_msg: a description of the action that is being executed, to be rendered
after the action has succeeded (but only if we have previously rendered
the ongoing_msg).
:param float timeout: the maximum amount of time to wait for a true result from the closure in
seconds. N.B. this is timing based, so won't be exact if the runtime of
the closure exceeds the timeout.
:param float wait_interval: the amount of time to sleep between closure invocations.
:param float info_interval: the amount of time to wait before and between reports via info
logging that we're still waiting for the closure to succeed.
:raises: :class:`ProcessManager.Timeout` on execution timeout.
"""
now = time.time()
deadline = now + timeout
info_deadline = now + info_interval
rendered_ongoing = False
while 1:
if closure():
if rendered_ongoing:
logger.info(completed_msg)
return True
now = time.time()
if now > deadline:
raise cls.Timeout(
"exceeded timeout of {} seconds while waiting for {}".format(
timeout, ongoing_msg
)
)
if now > info_deadline:
logger.info("waiting for {}...".format(ongoing_msg))
rendered_ongoing = True
info_deadline = info_deadline + info_interval
elif wait_interval:
time.sleep(wait_interval)
@classmethod
def _wait_for_file(
cls,
filename: str,
ongoing_msg: str,
completed_msg: str,
timeout: float = FAIL_WAIT_SEC,
want_content: bool = True,
):
"""Wait up to timeout seconds for filename to appear with a non-zero size or raise
Timeout()."""
def file_waiter():
return os.path.exists(filename) and (not want_content or os.path.getsize(filename))
return cls._deadline_until(file_waiter, ongoing_msg, completed_msg, timeout=timeout)
@classmethod
def _get_metadata_dir_by_name(cls, name: str, metadata_base_dir: str) -> str:
"""Retrieve the metadata dir by name.
This should always live outside of the workdir to survive a clean-all.
"""
return os.path.join(metadata_base_dir, cls.host_fingerprint, name)
def _metadata_file_path(self, metadata_key) -> str:
return self.metadata_file_path(self.name, metadata_key, self._metadata_base_dir)
@classmethod
def metadata_file_path(cls, name, metadata_key, metadata_base_dir) -> str:
return os.path.join(cls._get_metadata_dir_by_name(name, metadata_base_dir), metadata_key)
def read_metadata_by_name(self, metadata_key, caster=None):
"""Read process metadata using a named identity.
:param string metadata_key: The metadata key (e.g. 'pid').
:param func caster: A casting callable to apply to the read value (e.g. `int`).
"""
file_path = self._metadata_file_path(metadata_key)
try:
metadata = read_file(file_path).strip()
return self._maybe_cast(metadata, caster)
except (IOError, OSError):
return None
def write_metadata_by_name(self, metadata_key, metadata_value) -> None:
"""Write process metadata using a named identity.
:param string metadata_key: The metadata key (e.g. 'pid').
:param string metadata_value: The metadata value (e.g. '1729').
"""
safe_mkdir(self._get_metadata_dir_by_name(self.name, self._metadata_base_dir))
file_path = self._metadata_file_path(metadata_key)
safe_file_dump(file_path, metadata_value)
def await_metadata_by_name(
self, metadata_key, ongoing_msg: str, completed_msg: str, timeout: float, caster=None
):
"""Block up to a timeout for process metadata to arrive on disk.
:param string metadata_key: The metadata key (e.g. 'pid').
:param str ongoing_msg: A message that describes what is being waited for while waiting.
:param str completed_msg: A message that describes what was being waited for after completion.
:param float timeout: The deadline to write metadata.
:param type caster: A type-casting callable to apply to the read value (e.g. int, str).
:returns: The value of the metadata key (read from disk post-write).
:raises: :class:`ProcessManager.Timeout` on timeout.
"""
file_path = self._metadata_file_path(metadata_key)
self._wait_for_file(file_path, ongoing_msg, completed_msg, timeout=timeout)
return self.read_metadata_by_name(metadata_key, caster)
def purge_metadata_by_name(self, name) -> None:
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)
logger.debug("purging metadata directory: {}".format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise ProcessManager.MetadataError(
"failed to purge metadata directory {}: {!r}".format(meta_dir, e)
)
@property
def name(self):
"""The logical name/label of the process."""
return self._name
@memoized_property
def lifecycle_lock(self):
"""An identity-keyed inter-process lock for safeguarding lifecycle and other operations."""
safe_mkdir(self._metadata_base_dir)
return OwnerPrintingInterProcessFileLock(
# N.B. This lock can't key into the actual named metadata dir (e.g. `.pids/pantsd/lock`
# via `ProcessManager._get_metadata_dir_by_name()`) because of a need to purge
# the named metadata dir on startup to avoid stale metadata reads.
os.path.join(self._metadata_base_dir, ".lock.{}".format(self._name))
)
@property
def fingerprint(self):
"""The fingerprint of the current process.
This reads the current fingerprint from the `ProcessManager` metadata.
:returns: The fingerprint of the running process as read from ProcessManager metadata or `None`.
:rtype: string
"""
return self.read_metadata_by_name(self.FINGERPRINT_KEY)
@property
def pid(self):
"""The running processes pid (or None)."""
return self.read_metadata_by_name(self.PID_KEY, int)
@property
def process_name(self):
"""The process name, to be compared to the psutil exe_name for stale pid checking."""
return self.read_metadata_by_name(self.PROCESS_NAME_KEY, str)
@property
def socket(self):
"""The running processes socket/port information (or None)."""
return self.read_metadata_by_name(self.SOCKET_KEY, int)
def has_current_fingerprint(self, fingerprint):
"""Determines if a new fingerprint is the current fingerprint of the running process.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return fingerprint == self.fingerprint
def needs_restart(self, fingerprint):
"""Determines if the current ProcessManager needs to be started or restarted.
:param string fingerprint: The new fingerprint to compare to.
:rtype: bool
"""
return self.is_dead() or not self.has_current_fingerprint(fingerprint)
def await_pid(self, timeout: float) -> int:
"""Wait up to a given timeout for a process to write pid metadata."""
return cast(
int,
self.await_metadata_by_name(
self.PID_KEY,
f"{self._name} to start",
f"{self._name} started",
timeout,
caster=int,
),
)
def await_socket(self, timeout: float) -> int:
"""Wait up to a given timeout for a process to write socket info."""
return cast(
int,
self.await_metadata_by_name(
self.SOCKET_KEY,
f"{self._name} socket to be opened",
f"{self._name} socket opened",
timeout,
caster=int,
),
)
def write_pid(self, pid: Optional[int] = None):
"""Write the current process's PID."""
pid = os.getpid() if pid is None else pid
self.write_metadata_by_name(self.PID_KEY, str(pid))
def _get_process_name(self, process: psutil.Process | None = None) -> str:
proc = process or self._as_process()
cmdline = proc.cmdline()
return cast(str, cmdline[0] if cmdline else proc.name())
def write_process_name(self, process_name: Optional[str] = None):
"""Write the current process's name."""
process_name = process_name or self._get_process_name()
self.write_metadata_by_name(self.PROCESS_NAME_KEY, process_name)
def write_socket(self, socket_info: int):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self.SOCKET_KEY, str(socket_info))
def write_fingerprint(self, fingerprint: str) -> None:
self.write_metadata_by_name(self.FINGERPRINT_KEY, fingerprint)
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
:raises: :class:`self.NotStarted` if no pid has been recorded for this process.
"""
pid = self.pid
if not pid:
raise self.NotStarted()
return psutil.Process(pid)
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process)
or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE)
or
# Check for stale pids.
(self.process_name and self.process_name != self._get_process_name(process))
or
# Extended checking.
(extended_check and not extended_check(process))
)
except (self.NotStarted, psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessManager.purge_metadata_by_name() that checks for process
liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise ProcessManager.MetadataError("cannot purge metadata for a running process!")
self.purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug("terminating {}".format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug("sending signal {} to pid {}".format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning(
"caught OSError({e!s}) during attempt to kill -{signal} {pid}!".format(
e=e, signal=signal_type, pid=pid
)
)
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(
self.is_dead,
f"{self._name} to exit",
f"{self._name} exited",
timeout=kill_wait,
):
alive = False
logger.debug("successfully terminated pid {}".format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise ProcessManager.NonResponsiveProcess(
"failed to kill pid {pid} with signals {chain}".format(
pid=self.pid, chain=signal_chain
)
)
if purge:
self.purge_metadata(force=True)
def daemon_spawn(
self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None
):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork is extraneous given that Popen() also forks. Using this daemonization
method leaves the responsibility of writing the pid to the caller to allow for library-
agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
# fork's child execution
try:
os.setsid()
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logger.critical(traceback.format_exc())
finally:
os._exit(0)
else:
# fork's parent execution
try:
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logger.critical(traceback.format_exc())
def pre_fork(self):
"""Pre-fork callback for subclasses."""
def post_fork_child(self):
"""Pre-fork child callback for subclasses."""
def post_fork_parent(self):
"""Post-fork parent callback for subclasses."""
class PantsDaemonProcessManager(ProcessManager, metaclass=ABCMeta):
"""An ABC for classes that interact with pantsd's metadata.
This is extended by both a pantsd client handle, and by the server: the client reads process
metadata, and the server writes it.
"""
def __init__(self, bootstrap_options: Options, daemon_entrypoint: str):
super().__init__(
name="pantsd",
metadata_base_dir=bootstrap_options.for_global_scope().pants_subprocessdir,
)
self._bootstrap_options = bootstrap_options
self._daemon_entrypoint = daemon_entrypoint
@property
def options_fingerprint(self):
"""Returns the options fingerprint for the pantsd process.
This should cover all options consumed by the pantsd process itself in order to start: also
known as the "micro-bootstrap" options. These options are marked `daemon=True` in the global
options.
The `daemon=True` options are a small subset of the bootstrap options. Independently, the
PantsDaemonCore fingerprints the entire set of bootstrap options to identify when the
Scheduler needs need to be re-initialized.
"""
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE, self._bootstrap_options, fingerprint_key="daemon"
)
def needs_restart(self, option_fingerprint):
"""Overrides ProcessManager.needs_restart, to account for the case where pantsd is running
but we want to shutdown after this run.
:param option_fingerprint: A fingerprint of the global bootstrap options.
:return: True if the daemon needs to restart.
"""
return super().needs_restart(option_fingerprint)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
spawn_control_env = {
DAEMON_ENTRYPOINT: f"{self._daemon_entrypoint}:launch_new_pantsd_instance",
# The daemon should run under the same sys.path as us; so we ensure
# this. NB: It will scrub PYTHONPATH once started to avoid infecting
# its own unrelated subprocesses.
"PYTHONPATH": os.pathsep.join(sys.path),
}
exec_env = {**os.environ, **spawn_control_env}
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
spawn_control_env_vars = " ".join(f"{k}={v}" for k, v in spawn_control_env.items())
cmd_line = " ".join(cmd)
logger.debug(f"pantsd command is: {spawn_control_env_vars} {cmd_line}")
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.