gt
stringclasses
1 value
context
stringlengths
2.49k
119k
"""Tests for dense recursive polynomials' basic tools. """ from sympy.polys.densebasic import ( dup_LC, dmp_LC, dup_TC, dmp_TC, dmp_ground_LC, dmp_ground_TC, dmp_true_LT, dup_degree, dmp_degree, dmp_degree_in, dmp_degree_list, dup_strip, dmp_strip, dmp_validate, dup_reverse, dup_copy, dmp_copy, dup_normal, dmp_normal, dup_convert, dmp_convert, dup_nth, dmp_nth, dmp_ground_nth, dmp_zero_p, dmp_zero, dmp_one_p, dmp_one, dmp_ground_p, dmp_ground, dmp_negative_p, dmp_positive_p, dmp_zeros, dmp_grounds, dup_from_dict, dup_from_raw_dict, dup_to_dict, dup_to_raw_dict, dmp_from_dict, dmp_to_dict, dmp_swap, dmp_permute, dmp_nest, dmp_raise, dup_deflate, dmp_deflate, dup_multi_deflate, dmp_multi_deflate, dup_inflate, dmp_inflate, dmp_exclude, dmp_include, dmp_inject, dmp_eject, dup_terms_gcd, dmp_terms_gcd, dmp_list_terms, dmp_apply_pairs, ) from sympy.polys.specialpolys import ( f_0, f_1, f_2, f_3, f_4, f_5, f_6 ) from sympy.polys.polyclasses import ( DUP, DMP ) from sympy.polys.algebratools import ZZ, QQ from sympy.utilities.pytest import raises def test_dup_LC(): assert dup_LC([], ZZ) == 0 assert dup_LC([2,3,4,5], ZZ) == 2 def test_dup_TC(): assert dup_TC([], ZZ) == 0 assert dup_TC([2,3,4,5], ZZ) == 5 def test_dmp_LC(): assert dmp_LC([[]], ZZ) == [] assert dmp_LC([[2,3,4],[5]], ZZ) == [2,3,4] assert dmp_LC([[[]]], ZZ) == [[]] assert dmp_LC([[[2],[3,4]],[[5]]], ZZ) == [[2],[3,4]] def test_dmp_TC(): assert dmp_TC([[]], ZZ) == [] assert dmp_TC([[2,3,4],[5]], ZZ) == [5] assert dmp_TC([[[]]], ZZ) == [[]] assert dmp_TC([[[2],[3,4]],[[5]]], ZZ) == [[5]] def test_dmp_ground_LC(): assert dmp_ground_LC([[]], 1, ZZ) == 0 assert dmp_ground_LC([[2,3,4],[5]], 1, ZZ) == 2 assert dmp_ground_LC([[[]]], 2, ZZ) == 0 assert dmp_ground_LC([[[2],[3,4]],[[5]]], 2, ZZ) == 2 def test_dmp_ground_TC(): assert dmp_ground_TC([[]], 1, ZZ) == 0 assert dmp_ground_TC([[2,3,4],[5]], 1, ZZ) == 5 assert dmp_ground_TC([[[]]], 2, ZZ) == 0 assert dmp_ground_TC([[[2],[3,4]],[[5]]], 2, ZZ) == 5 def test_dmp_true_LT(): assert dmp_true_LT([[]], 1, ZZ) == ((0, 0), 0) assert dmp_true_LT([[7]], 1, ZZ) == ((0, 0), 7) assert dmp_true_LT([[1,0]], 1, ZZ) == ((0, 1), 1) assert dmp_true_LT([[1],[]], 1, ZZ) == ((1, 0), 1) assert dmp_true_LT([[1,0],[]], 1, ZZ) == ((1, 1), 1) def test_dup_degree(): assert dup_degree([]) == -1 assert dup_degree([1]) == 0 assert dup_degree([1,0]) == 1 assert dup_degree([1,0,0,0,1]) == 4 def test_dmp_degree(): assert dmp_degree([[]], 1) == -1 assert dmp_degree([[[]]], 2) == -1 assert dmp_degree([[1]], 1) == 0 assert dmp_degree([[2],[1]], 1) == 1 def test_dmp_degree_in(): assert dmp_degree_in([[[]]], 0, 2) == -1 assert dmp_degree_in([[[]]], 1, 2) == -1 assert dmp_degree_in([[[]]], 2, 2) == -1 assert dmp_degree_in([[[1]]], 0, 2) == 0 assert dmp_degree_in([[[1]]], 1, 2) == 0 assert dmp_degree_in([[[1]]], 2, 2) == 0 assert dmp_degree_in(f_4, 0, 2) == 9 assert dmp_degree_in(f_4, 1, 2) == 12 assert dmp_degree_in(f_4, 2, 2) == 8 assert dmp_degree_in(f_6, 0, 2) == 4 assert dmp_degree_in(f_6, 1, 2) == 4 assert dmp_degree_in(f_6, 2, 2) == 6 assert dmp_degree_in(f_6, 3, 3) == 3 raises(IndexError, "dmp_degree_in([[1]], -5, 1)") def test_dmp_degree_list(): assert dmp_degree_list([[[[ ]]]], 3) == (-1,-1,-1,-1) assert dmp_degree_list([[[[1]]]], 3) == ( 0, 0, 0, 0) assert dmp_degree_list(f_0, 2) == (2, 2, 2) assert dmp_degree_list(f_1, 2) == (3, 3, 3) assert dmp_degree_list(f_2, 2) == (5, 3, 3) assert dmp_degree_list(f_3, 2) == (5, 4, 7) assert dmp_degree_list(f_4, 2) == (9, 12, 8) assert dmp_degree_list(f_5, 2) == (3, 3, 3) assert dmp_degree_list(f_6, 3) == (4, 4, 6, 3) def test_dup_strip(): assert dup_strip([]) == [] assert dup_strip([0]) == [] assert dup_strip([0,0,0]) == [] assert dup_strip([1]) == [1] assert dup_strip([0,1]) == [1] assert dup_strip([0,0,0,1]) == [1] assert dup_strip([1,2,0]) == [1,2,0] assert dup_strip([0,1,2,0]) == [1,2,0] assert dup_strip([0,0,0,1,2,0]) == [1,2,0] def test_dmp_strip(): assert dmp_strip([0,1,0], 0) == [1,0] assert dmp_strip([[]], 1) == [[]] assert dmp_strip([[], []], 1) == [[]] assert dmp_strip([[], [], []], 1) == [[]] assert dmp_strip([[[]]], 2) == [[[]]] assert dmp_strip([[[]], [[]]], 2) == [[[]]] assert dmp_strip([[[]], [[]], [[]]], 2) == [[[]]] assert dmp_strip([[[1]]], 2) == [[[1]]] assert dmp_strip([[[]], [[1]]], 2) == [[[1]]] assert dmp_strip([[[]], [[1]], [[]]], 2) == [[[1]], [[]]] def test_dmp_validate(): assert dmp_validate([]) == ([], 0) assert dmp_validate([0,0,0,1,0]) == ([1,0], 0) assert dmp_validate([[[]]]) == ([[[]]], 2) assert dmp_validate([[0],[],[0],[1],[0]]) == ([[1],[]], 1) raises(ValueError, 'dmp_validate([[0],0,[0],[1],[0]])') def test_dup_reverse(): assert dup_reverse([1,2,0,3]) == [3,0,2,1] assert dup_reverse([1,2,3,0]) == [3,2,1] def test_dup_copy(): f = [ZZ(1),ZZ(0),ZZ(2)] g = dup_copy(f) g[0], g[2] = ZZ(7), ZZ(0) assert f != g def test_dmp_copy(): f = [[ZZ(1)],[ZZ(2),ZZ(0)]] g = dmp_copy(f, 1) g[0][0], g[1][1] = ZZ(7), ZZ(1) assert f != g def test_dup_normal(): assert dup_normal([0,0,2,1,0,11,0], ZZ) == \ [ZZ(2),ZZ(1),ZZ(0),ZZ(11),ZZ(0)] def test_dmp_normal(): assert dmp_normal([[0],[],[0,2,1],[0],[11],[]], 1, ZZ) == \ [[ZZ(2),ZZ(1)],[],[ZZ(11)],[]] def test_dup_convert(): K0, K1 = ZZ['x'], ZZ f = [DMP([1], ZZ),DMP([2], ZZ),DMP([], ZZ),DMP([3], ZZ)] assert dup_convert(f, K0, K1) == \ [ZZ(1),ZZ(2),ZZ(0),ZZ(3)] def test_dmp_convert(): K0, K1 = ZZ['x'], ZZ f = [[DMP([1], ZZ)],[DMP([2], ZZ)],[],[DMP([3], ZZ)]] assert dmp_convert(f, 1, K0, K1) == \ [[ZZ(1)],[ZZ(2)],[],[ZZ(3)]] def test_dup_nth(): assert dup_nth([1,2,3], 0, ZZ) == 3 assert dup_nth([1,2,3], 1, ZZ) == 2 assert dup_nth([1,2,3], 2, ZZ) == 1 assert dup_nth([1,2,3], 9, ZZ) == 0 raises(IndexError, 'dup_nth([3,4,5], -1, ZZ)') def test_dmp_nth(): assert dmp_nth([[1],[2],[3]], 0, 1, ZZ) == [3] assert dmp_nth([[1],[2],[3]], 1, 1, ZZ) == [2] assert dmp_nth([[1],[2],[3]], 2, 1, ZZ) == [1] assert dmp_nth([[1],[2],[3]], 9, 1, ZZ) == [] raises(IndexError, 'dmp_nth([[3],[4],[5]], -1, 1, ZZ)') def test_dmp_ground_nth(): assert dmp_ground_nth([[1],[2],[3]], (0,0), 1, ZZ) == 3 assert dmp_ground_nth([[1],[2],[3]], (1,0), 1, ZZ) == 2 assert dmp_ground_nth([[1],[2],[3]], (2,0), 1, ZZ) == 1 assert dmp_ground_nth([[1],[2],[3]], (2,1), 1, ZZ) == 0 assert dmp_ground_nth([[1],[2],[3]], (3,0), 1, ZZ) == 0 raises(IndexError, 'dmp_ground_nth([[3],[4],[5]], (2,-1), 1, ZZ)') def test_dmp_zero_p(): assert dmp_zero_p([], 0) == True assert dmp_zero_p([[]], 1) == True assert dmp_zero_p([[[]]], 2) == True assert dmp_zero_p([[[1]]], 2) == False def test_dmp_zero(): assert dmp_zero(0) == [] assert dmp_zero(2) == [[[]]] def test_dmp_one_p(): assert dmp_one_p([1], 0, ZZ) == True assert dmp_one_p([[1]], 1, ZZ) == True assert dmp_one_p([[[1]]], 2, ZZ) == True assert dmp_one_p([[[12]]], 2, ZZ) == False def test_dmp_one(): assert dmp_one(0, ZZ) == [ZZ(1)] assert dmp_one(2, ZZ) == [[[ZZ(1)]]] def test_dmp_ground_p(): assert dmp_ground_p([], 0, 0) == True assert dmp_ground_p([[]], 0, 1) == True assert dmp_ground_p([[]], 1, 1) == False assert dmp_ground_p([[ZZ(1)]], 1, 1) == True assert dmp_ground_p([[[ZZ(2)]]], 2, 2) == True assert dmp_ground_p([[[ZZ(2)]]], 3, 2) == False assert dmp_ground_p([[[ZZ(3)], []]], 3, 2) == False assert dmp_ground_p([], None, 0) == True assert dmp_ground_p([[]], None, 1) == True assert dmp_ground_p([ZZ(1)], None, 0) == True assert dmp_ground_p([[[ZZ(1)]]], None, 2) == True assert dmp_ground_p([[[ZZ(3)], []]], None, 2) == False def test_dmp_ground(): assert dmp_ground(ZZ(0), 2) == [[[]]] assert dmp_ground(ZZ(7),-1) == ZZ(7) assert dmp_ground(ZZ(7), 0) == [ZZ(7)] assert dmp_ground(ZZ(7), 2) == [[[ZZ(7)]]] def test_dmp_zeros(): assert dmp_zeros(4, 0, ZZ) == [[], [], [], []] assert dmp_zeros(0, 2, ZZ) == [] assert dmp_zeros(1, 2, ZZ) == [[[[]]]] assert dmp_zeros(2, 2, ZZ) == [[[[]]], [[[]]]] assert dmp_zeros(3, 2, ZZ) == [[[[]]], [[[]]], [[[]]]] assert dmp_zeros(3, -1, ZZ) == [0, 0, 0] def test_dmp_grounds(): assert dmp_grounds(ZZ(7), 0, 2) == [] assert dmp_grounds(ZZ(7), 1, 2) == [[[[7]]]] assert dmp_grounds(ZZ(7), 2, 2) == [[[[7]]], [[[7]]]] assert dmp_grounds(ZZ(7), 3, 2) == [[[[7]]], [[[7]]], [[[7]]]] assert dmp_grounds(ZZ(7), 3, -1) == [7, 7, 7] def test_dmp_negative_p(): assert dmp_negative_p([[[]]], 2, ZZ) == False assert dmp_negative_p([[[1], [2]]], 2, ZZ) == False assert dmp_negative_p([[[-1], [2]]], 2, ZZ) == True def test_dmp_positive_p(): assert dmp_positive_p([[[]]], 2, ZZ) == False assert dmp_positive_p([[[1], [2]]], 2, ZZ) == True assert dmp_positive_p([[[-1], [2]]], 2, ZZ) == False def test_dup_from_to_dict(): assert dup_from_raw_dict({}, ZZ) == [] assert dup_from_dict({}, ZZ) == [] assert dup_to_raw_dict([]) == {} assert dup_to_dict([]) == {} f = [3,0,0,2,0,0,0,0,8] g = {8: 3, 5: 2, 0: 8} h = {(8,): 3, (5,): 2, (0,): 8} assert dup_from_raw_dict(g, ZZ) == f assert dup_from_dict(h, ZZ) == f assert dup_to_raw_dict(f) == g assert dup_to_dict(f) == h K = ZZ['x','y'] f = [K([[3]]),K([[]]),K([[2]]),K([[]]),K([[]]),K([[8]])] g = {5: K([[3]]), 3: K([[2]]), 0: K([[8]])} h = {(5,): K([[3]]), (3,): K([[2]]), (0,): K([[8]])} assert dup_from_raw_dict(g, K) == f assert dup_from_dict(h, K) == f assert dup_to_raw_dict(f) == g assert dup_to_dict(f) == h def test_dmp_from_to_dict(): assert dmp_from_dict({}, 1, ZZ) == [[]] assert dmp_to_dict([[]], 1) == {} f = [[3],[],[],[2],[],[],[],[],[8]] g = {(8,0): 3, (5,0): 2, (0,0): 8} assert dmp_from_dict(g, 1, ZZ) == f assert dmp_to_dict(f, 1) == g def test_dmp_swap(): f = dmp_normal([[1,0,0],[],[1,0],[],[1]], 1, ZZ) g = dmp_normal([[1,0,0,0,0],[1,0,0],[1]], 1, ZZ) assert dmp_swap(f, 1, 1, 1, ZZ) == f assert dmp_swap(f, 0, 1, 1, ZZ) == g assert dmp_swap(g, 0, 1, 1, ZZ) == f raises(IndexError, "dmp_swap(f, -1, -7, 1, ZZ)") def test_dmp_permute(): f = dmp_normal([[1,0,0],[],[1,0],[],[1]], 1, ZZ) g = dmp_normal([[1,0,0,0,0],[1,0,0],[1]], 1, ZZ) assert dmp_permute(f, [0, 1], 1, ZZ) == f assert dmp_permute(g, [0, 1], 1, ZZ) == g assert dmp_permute(f, [1, 0], 1, ZZ) == g assert dmp_permute(g, [1, 0], 1, ZZ) == f def test_dmp_nest(): assert dmp_nest(ZZ(1), 2, ZZ) == [[[1]]] assert dmp_nest([[1]], 0, ZZ) == [[1]] assert dmp_nest([[1]], 1, ZZ) == [[[1]]] assert dmp_nest([[1]], 2, ZZ) == [[[[1]]]] def test_dmp_raise(): assert dmp_raise([], 2, 0, ZZ) == [[[]]] assert dmp_raise([[1]], 0, 1, ZZ) == [[1]] assert dmp_raise([[1,2,3], [], [2,3]], 2, 1, ZZ) == \ [[[[1]],[[2]],[[3]]], [[[]]], [[[2]],[[3]]]] def test_dup_deflate(): assert dup_deflate([], ZZ) == (1, []) assert dup_deflate([2], ZZ) == (1, [2]) assert dup_deflate([1,2,3], ZZ) == (1, [1,2,3]) assert dup_deflate([1,0,2,0,3], ZZ) == (2, [1,2,3]) assert dup_deflate(dup_from_raw_dict({7:1,1:1}, ZZ), ZZ) == \ (1, [1, 0, 0, 0, 0, 0, 1, 0]) assert dup_deflate(dup_from_raw_dict({7:1,0:1}, ZZ), ZZ) == \ (7, [1, 1]) assert dup_deflate(dup_from_raw_dict({7:1,3:1}, ZZ), ZZ) == \ (1, [1, 0, 0, 0, 1, 0, 0, 0]) assert dup_deflate(dup_from_raw_dict({7:1,4:1}, ZZ), ZZ) == \ (1, [1, 0, 0, 1, 0, 0, 0, 0]) assert dup_deflate(dup_from_raw_dict({8:1,4:1}, ZZ), ZZ) == \ (4, [1, 1, 0]) assert dup_deflate(dup_from_raw_dict({8:1}, ZZ), ZZ) == \ (8, [1, 0]) assert dup_deflate(dup_from_raw_dict({7:1}, ZZ), ZZ) == \ (7, [1, 0]) assert dup_deflate(dup_from_raw_dict({1:1}, ZZ), ZZ) == \ (1, [1, 0]) def test_dmp_deflate(): assert dmp_deflate([[]], 1, ZZ) == ((1, 1), [[]]) assert dmp_deflate([[2]], 1, ZZ) == ((1, 1), [[2]]) f = [[1, 0, 0], [], [1, 0], [], [1]] assert dmp_deflate(f, 1, ZZ) == ((2, 1), [[1, 0, 0], [1, 0], [1]]) def test_dup_multi_deflate(): assert dup_multi_deflate(([2],), ZZ) == (1, ([2],)) assert dup_multi_deflate(([], []), ZZ) == (1, ([], [])) assert dup_multi_deflate(([1,2,3],), ZZ) == (1, ([1,2,3],)) assert dup_multi_deflate(([1,0,2,0,3],), ZZ) == (2, ([1,2,3],)) assert dup_multi_deflate(([1,0,2,0,3], [2,0,0]), ZZ) == \ (2, ([1,2,3], [2,0])) assert dup_multi_deflate(([1,0,2,0,3], [2,1,0]), ZZ) == \ (1, ([1,0,2,0,3], [2,1,0])) def test_dmp_multi_deflate(): assert dmp_multi_deflate(([[]],), 1, ZZ) == \ ((1, 1), ([[]],)) assert dmp_multi_deflate(([[]], [[]]), 1, ZZ) == \ ((1, 1), ([[]], [[]])) assert dmp_multi_deflate(([[1]], [[]]), 1, ZZ) == \ ((1, 1), ([[1]], [[]])) assert dmp_multi_deflate(([[1]], [[2]]), 1, ZZ) == \ ((1, 1), ([[1]], [[2]])) assert dmp_multi_deflate(([[1]], [[2,0]]), 1, ZZ) == \ ((1, 1), ([[1]], [[2, 0]])) assert dmp_multi_deflate(([[2,0]], [[2,0]]), 1, ZZ) == \ ((1, 1), ([[2, 0]], [[2, 0]])) assert dmp_multi_deflate(([[2]], [[2,0,0]]), 1, ZZ) == ((1, 2), ([[2]], [[2, 0]])) assert dmp_multi_deflate(([[2,0,0]], [[2,0,0]]), 1, ZZ) == ((1, 2), ([[2, 0]], [[2, 0]])) assert dmp_multi_deflate(([2,0,0], [1,0,4,0,1]), 0, ZZ) == \ ((2,), ([2, 0], [1, 4, 1])) f = [[1, 0, 0], [], [1, 0], [], [1]] g = [[1, 0, 1, 0], [], [1]] assert dmp_multi_deflate((f,), 1, ZZ) == \ ((2, 1), ([[1, 0, 0], [1, 0], [1]],)) assert dmp_multi_deflate((f, g), 1, ZZ) == \ ((2, 1), ([[1, 0, 0], [1, 0], [1]], [[1, 0, 1, 0], [1]])) def test_dup_inflate(): assert dup_inflate([], 17, ZZ) == [] assert dup_inflate([1,2,3], 1, ZZ) == [1,2,3] assert dup_inflate([1,2,3], 2, ZZ) == [1,0,2,0,3] assert dup_inflate([1,2,3], 3, ZZ) == [1,0,0,2,0,0,3] assert dup_inflate([1,2,3], 4, ZZ) == [1,0,0,0,2,0,0,0,3] raises(IndexError, 'dup_inflate([1,2,3], 0, ZZ)') def test_dmp_inflate(): assert dmp_inflate([1], (3,), 0, ZZ) == [1] assert dmp_inflate([[]], (3, 7), 1, ZZ) == [[]] assert dmp_inflate([[2]], (1, 2), 1, ZZ) == [[2]] assert dmp_inflate([[2,0]], (1, 1), 1, ZZ) == [[2,0]] assert dmp_inflate([[2,0]], (1, 2), 1, ZZ) == [[2,0,0]] assert dmp_inflate([[2,0]], (1, 3), 1, ZZ) == [[2,0,0,0]] assert dmp_inflate([[1, 0, 0], [1], [1, 0]], (2, 1), 1, ZZ) == \ [[1, 0, 0], [], [1], [], [1, 0]] raises(IndexError, "dmp_inflate([[]], (-3, 7), 1, ZZ)") def test_dmp_exclude(): assert dmp_exclude([[[]]], 2, ZZ) == ([], [[[]]], 2) assert dmp_exclude([[[7]]], 2, ZZ) == ([], [[[7]]], 2) assert dmp_exclude([1,2,3], 0, ZZ) == ([], [1,2,3], 0) assert dmp_exclude([[1],[2,3]], 1, ZZ) == ([], [[1],[2,3]], 1) assert dmp_exclude([[1,2,3]], 1, ZZ) == ([0], [1,2,3], 0) assert dmp_exclude([[1],[2],[3]], 1, ZZ) == ([1], [1,2,3], 0) assert dmp_exclude([[[1,2,3]]], 2, ZZ) == ([0,1], [1,2,3], 0) assert dmp_exclude([[[1]],[[2]],[[3]]], 2, ZZ) == ([1,2], [1,2,3], 0) def test_dmp_include(): assert dmp_include([1,2,3], [], 0, ZZ) == [1,2,3] assert dmp_include([1,2,3], [0], 0, ZZ) == [[1,2,3]] assert dmp_include([1,2,3], [1], 0, ZZ) == [[1],[2],[3]] assert dmp_include([1,2,3], [0,1], 0, ZZ) == [[[1,2,3]]] assert dmp_include([1,2,3], [1,2], 0, ZZ) == [[[1]],[[2]],[[3]]] def test_dmp_inject(): K = ZZ['x','y'] assert dmp_inject([], 0, K) == ([[[]]], 2) assert dmp_inject([[]], 1, K) == ([[[[]]]], 3) assert dmp_inject([K([[1]])], 0, K) == ([[[1]]], 2) assert dmp_inject([[K([[1]])]], 1, K) == ([[[[1]]]], 3) assert dmp_inject([K([[1]]),K([[2],[3,4]])], 0, K) == ([[[1]],[[2],[3,4]]], 2) f = [K([[3],[7,0],[5,0,0]]),K([[2],[]]),K([[]]),K([[1,0,0],[11]])] g = [[[3],[7,0],[5,0,0]],[[2],[]],[[]],[[1,0,0],[11]]] assert dmp_inject(f, 0, K) == (g, 2) def test_dmp_eject(): K = ZZ['x','y'] assert dmp_eject([[[]]], 2, K) == [] assert dmp_eject([[[[]]]], 3, K) == [[]] assert dmp_eject([[[1]]], 2, K) == [K([[1]])] assert dmp_eject([[[[1]]]], 3, K) == [[K([[1]])]] assert dmp_eject([[[1]],[[2],[3,4]]], 2, K) == [K([[1]]),K([[2],[3,4]])] f = [K([[3],[7,0],[5,0,0]]),K([[2],[]]),K([[]]),K([[1,0,0],[11]])] g = [[[3],[7,0],[5,0,0]],[[2],[]],[[]],[[1,0,0],[11]]] assert dmp_eject(g, 2, K) == f def test_dup_terms_gcd(): assert dup_terms_gcd([], ZZ) == (0, []) assert dup_terms_gcd([1,0,1], ZZ) == (0, [1,0,1]) assert dup_terms_gcd([1,0,1,0], ZZ) == (1, [1,0,1]) def test_dmp_terms_gcd(): assert dmp_terms_gcd([[]], 1, ZZ) == ((0,0), [[]]) assert dmp_terms_gcd([1,0,1,0], 0, ZZ) == ((1,), [1,0,1]) assert dmp_terms_gcd([[1],[],[1],[]], 1, ZZ) == ((1,0), [[1],[],[1]]) assert dmp_terms_gcd([[1,0],[],[1]], 1, ZZ) == ((0,0), [[1,0],[],[1]]) assert dmp_terms_gcd([[1,0],[1,0,0],[],[]], 1, ZZ) == ((2,1), [[1],[1,0]]) def test_dmp_list_terms(): assert dmp_list_terms([[[]]], 2, ZZ) == [((0,0,0), 0)] assert dmp_list_terms([[[1]]], 2, ZZ) == [((0,0,0), 1)] assert dmp_list_terms([1,2,4,3,5], 0, ZZ) == \ [((4,), 1), ((3,), 2), ((2,), 4), ((1,), 3), ((0,), 5)] assert dmp_list_terms([[1],[2,4],[3,5,0]], 1, ZZ) == \ [((2, 0), 1), ((1, 1), 2), ((1, 0), 4), ((0, 2), 3), ((0, 1), 5)] def test_dmp_apply_pairs(): h = lambda a, b: a*b assert dmp_apply_pairs([1,2,3], [4,5,6], h, [], 0, ZZ) == [4,10,18] assert dmp_apply_pairs([2,3], [4,5,6], h, [], 0, ZZ) == [10,18] assert dmp_apply_pairs([1,2,3], [5,6], h, [], 0, ZZ) == [10,18] assert dmp_apply_pairs([[1,2],[3]], [[4,5],[6]], h, [], 1, ZZ) == [[4,10],[18]] assert dmp_apply_pairs([[1,2],[3]], [[4],[5,6]], h, [], 1, ZZ) == [[8],[18]] assert dmp_apply_pairs([[1],[2,3]], [[4,5],[6]], h, [], 1, ZZ) == [[5],[18]]
# coding=utf-8 # Copyright 2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock import pytest import conftest import test_icontrol_driver import test_plugin_rpc from f5_openstack_agent.lbaasv2.drivers.bigip import agent_manager import class_tester_base_class import mock_builder_base_class @pytest.fixture @mock.patch('f5_openstack_agent.lbaasv2.drivers.bigip.agent_manager.' 'LbaasAgentManager._setup_rpc') @mock.patch('f5_openstack_agent.lbaasv2.drivers.bigip.agent_manager.' 'importutils.import_object') def agent_mgr_setup(mock_importutils, mock_setup_rpc): return agent_manager.LbaasAgentManager(mock.MagicMock(name='conf')) @pytest.mark.skip(reason="Mocked RPC breaks initialization") @mock.patch('f5_openstack_agent.lbaasv2.drivers.bigip.agent_manager.LOG') def test_update_fdb_entries(mock_log, agent_mgr_setup): '''When func is called in agent_manager, it prooduces a warning message.''' agent_mgr_setup.update_fdb_entries('', '') warning_msg = "update_fdb_entries: the LBaaSv2 Agent does not handle an " \ "update of the IP address of a neutron port. This port is generally " \ "tied to a member. If the IP address of a member was changed, be " \ "sure to also recreate the member in neutron-lbaas with the new " \ "address." assert mock_log.warning.call_args == mock.call(warning_msg) class TestLbaasAgentManagerMockBuilder(mock_builder_base_class.MockBuilderBase, conftest.TestingWithServiceConstructor): """Builder class for Mock objects that mock LbaasAgentManager This class builds mock-module class objects for isolation of the LbaasAgentManager. As such, all reference to `target` are pointing to either an instantiated instance of LbaasAgentManager or is a mocked instance of this class. Use: class Tester(object): my_mock_builder = TestLbaasAgentManagerMockBuilder standalone = TestLbaasAgentManagerMockBuilder.standalone neutron_only = TestLbaasAgentManagerMockBuilder.neutron_only bigip_only = TestLbaasAgentManagerMockBuilder.bigip_only fully_int = TestLbaasAgentManagerMockBuilder.fully_int fixture = my_mock_builder.fixture def test_foo(fixture): # this then uses the pytest.fixture fixture from MockBuilder """ # non-instantiated _other_builders = dict( lbdriver=test_icontrol_driver.TestiControlDriverMockBuilder, plugin_rpc=test_plugin_rpc.TestPluginRpcMockBuilder) @staticmethod def mocked_target(*args): """Build a Mock target that totally skips the __init__ method This is typically a building block that builds just an instantiated instance of target that has limited to no attibute quality that is otherwise generated by fully_mocked_target(). Thus, the return is a partially-completed dead-end Target object instance. """ with mock.patch( 'f5_openstack_agent.lbaasv2.drivers.bigip.agent_manager.' 'LbaasAgentManager.__init__') as my_init: my_init.return_value = None conf = mock.Mock() new_target = agent_manager.LbaasAgentManager(conf) new_target.conf = conf return new_target def fully_mocked_target(self, mocked_target): """Creates a mocked target that mocks all lower other_builders' targets This does not mean that the caller's black-box is limited to this target, but can drill further using a system of either mocks or non-mocks. Please see conftest.MockBuilder for details. """ # Mock() objects here should be filled in with the appropriate mocks... mocked_target.context = 'context' mocked_target.serializer = None mocked_target.cache = mock.Mock() mocked_target.last_resync = mock.Mock() mocked_target.needs_resync = False mocked_target.plugin_rpc = \ self.other_builders['plugin_rpc'].new_fully_mocked_target() mocked_target.tunnel_rpc = mock.Mock() mocked_target.l2_pop_rpc = mock.Mock() mocked_target.state_rpc = mock.Mock() mocked_target.pending_services = {} mocked_target.service_resync_interval = 5 mocked_target.lbdriver = \ self.other_builders['lbdriver'].new_fully_mocked_target() mocked_target.agent_host = 'conf.host:agent_hash' agent_configurations = ( {'environment_prefix': 'environment_prefix', 'environment_group_number': 'environment_group_number', 'global_routed_mode': 'f5_global_routed_mode'} ) mocked_target.admin_state_up = 'start_agent_admin_state_up' mocked_target.agent_state = { 'binary': 'AGENT_BINARY_NAME', 'host': mocked_target.agent_host, 'topic': 'TOPIC_LOADBALANCER_AGENT_V2', 'agent_type': 'AGENT_TYPE_LOADBALANCERV2', 'l2_population': 'l2_population', 'start_flag': True, 'configurations': agent_configurations } mocked_target.endpoints = mocked_target mocked_target.connection = mock.Mock() return mocked_target def new_fully_mocked_target(self): return self.fully_mocked_target(self.mocked_target()) def mock_all_get_all_deployed(self, target=None, **kwargs): """Modifies target to have at least one purgable item per ref type""" if not target: target = self.new_fully_mocked_target() listing = [ 'get_all_deployed_loadbalancers', 'get_all_deployed_listeners', 'get_all_deployed_l7_policys', 'get_all_deployed_health_monitors', 'get_all_deployed_pools'] for method in listing: self.mock_other_builders_method( target, method, targets_attr='lbdriver', expected_args=[], **kwargs) def mock_all_purges(self, target=None): """Performs a basic mock against all purges methods Example: purge_orphaned_loadbalancers """ if not target: target = self.new_fully_mocked_target() self.mock_purge_orphaned_loadbalancers(target) self.mock_purge_orphaned_listeners(target) self.mock_purge_orphaned_l7_policys(target) self.mock_purge_orphaned_pools(target) self.mock_purge_orphaned_nodes(target) self.mock_purge_orphaned_health_monitors(target) def mock_purge_orphaned_loadbalancers( self, target=None, call_cnt=1, static=None, expected_args=None, **kwargs): """Mocks the target's purge_orphaned_loadbalancers method The given kwargs will be passed to the mock.Mock call This will also create a new fully_mocked_target if target is not specified. """ if not target: target = self.new_fully_mocked_target() self._mockfactory(target, 'purge_orphaned_loadbalancers', static, call_cnt, expected_args, kwargs) return target def mock_purge_orphaned_listeners( self, target=None, call_cnt=1, static=None, expected_args=None, **kwargs): """Mocks the target's purge_orphaned_listeners method The given kwargs will be passed to the mock.Mock call This will also create a new fully_mocked_target if target is not specified. """ if not target: target = self.new_fully_mocked_target() self._mockfactory(target, 'purge_orphaned_listeners', static, call_cnt, expected_args, kwargs) return target def mock_purge_orphaned_l7_policys( self, target=None, call_cnt=1, static=None, expected_args=None, **kwargs): """Mocks the target's purge_orphaned_l7_policys method The given kwargs will be passed to the mock.Mock call This will also create a new fully_mocked_target if target is not specified. """ if not target: target = self.new_fully_mocked_target() self._mockfactory(target, 'purge_orphaned_l7_policys', static, call_cnt, expected_args, kwargs) return target def mock_purge_orphaned_pools( self, target=None, call_cnt=1, static=None, expected_args=None, **kwargs): """Mocks the target's purge_orphaned_pools method The given kwargs will be passed to the mock.Mock call This will also create a new fully_mocked_target if target is not specified. """ if not target: target = self.new_fully_mocked_target() self._mockfactory(target, 'purge_orphaned_pools', static, call_cnt, expected_args, kwargs) return target def mock_purge_orphaned_nodes( self, target=None, call_cnt=1, static=None, expected_args=None, **kwargs): """Mocks the target's purge_orphaned_nodes method The given kwargs will be passed to the mock.Mock call This will also create a new fully_mocked_target if target is not specified. """ if not target: target = self.new_fully_mocked_target() self._mockfactory(target, 'purge_orphaned_nodes', static, call_cnt, expected_args, kwargs) return target def mock_purge_orphaned_health_monitors( self, target=None, call_cnt=1, static=None, expected_args=None, **kwargs): """Mocks the target's purge_orphaned_health_monitors method The given kwargs will be passed to the mock.Mock call This will also create a new fully_mocked_target if target is not specified. """ if not target: target = self.new_fully_mocked_target() self._mockfactory(target, 'purge_orphaned_health_monitors', static, call_cnt, expected_args, kwargs) return target def mock_purge_orphaned_policys( self, target=None, call_cnt=1, static=None, expected_args=None, **kwargs): """Mocks the target's purge_orphaned_policys method The given kwargs will be passed to the mock.Mock call This will also create a new fully_mocked_target if target is not specified. """ if not target: target = self.new_fully_mocked_target() self._mockfactory(target, 'purge_orphaned_policys', static, call_cnt, expected_args, kwargs) return target # replace the name to LBaasmAgentManagerClassMocker as decided by team class LBaasAgentManagerMocker(object): """To-be Instantiated Mocker class that tracks 'frozen' code space This class is meant to be a code-space tracker element that tracks code space variables and pointers to keep original code space elements in tact. Upon setUp of a test instance, the fixtures here will create and replace the code space elements with either mocks or temporary variables. Upon tearDown, these frozen code space elements are restored and mocks and temporary variables are restored. Using this class's methods should be limited to global-code-space libraries imported from Python standard libraries or pip-hosted libraries. NOT F5-controlled libraries within this repo. Those should be handled and built by the appropriate MockBuilder classes within the modules that target the class to be mocked. """ @pytest.fixture def mock_logger(self): """Mocks the target's logger element for caller's use in testing""" my_logger = mock.Mock() self.freeze_logger = agent_manager.LOG self.logger = my_logger agent_manager.LOG = my_logger def teardown(self): """Performs teardown operations dynamically to catch fixtures used""" if hasattr(self, 'freeze_logger'): agent_manager.LOG = self.freeze_logger class TestLbaasAgentManager(LBaasAgentManagerMocker, class_tester_base_class.ClassTesterBase): """Tester class that tests the AgentManager Tests under this tester class should test the code in agent_manager.py and encompass both fully-comprehensive white-box tests (fcwb) and some black- box tests (bb). Black-box tests should provide information on where they are limited to in the pydoc for the method. """ # this is not instantiated builder = TestLbaasAgentManagerMockBuilder # fixtures hosted by builder (add more if needed): # standalone_builder = TestLbaasAgentManagerMockBuilder.standalone_builder # mocked_target = my_builder.mocked_target # NOTE: in the above list, do not add mock_{method}'s as these cannot be # fixtures because they are instantiated! def test_fcwb_clean_orphaned_objects_and_save_device_config( self, standalone_builder, fully_mocked_target, mock_logger): """Performs fully-comprehensive testing for this method White-box test for: agent_manager.LBaasAgentManager.\ clean_orphaned_objects_and_save_device_config That verfies grammar and "freezes" logic. """ target = fully_mocked_target def no_global_agent_exists(self, builder, target): plugin_rpc_get_clusterwide_agent_retval = dict() get_clusterwide_agent_expected = \ tuple([target.conf.environment_prefix, target.conf.environmentgroup_number]) builder.mock_other_builders_method( target, 'get_clusterwide_agent', targets_attr='plugin_rpc', call_cnt=1, expected_args=get_clusterwide_agent_expected, return_value=plugin_rpc_get_clusterwide_agent_retval) assert target.clean_orphaned_objects_and_save_device_config() builder.check_mocks(target) def global_agent_exists(self, builder, target): plugin_rpc_get_clusterwide_agent_retval = \ dict(host=target.agent_host) get_clusterwide_agent_expected = \ tuple([target.conf.environment_prefix, target.conf.environmentgroup_number]) builder.mock_other_builders_method( target, 'get_clusterwide_agent', targets_attr='plugin_rpc', call_cnt=1, expected_args=get_clusterwide_agent_expected, return_value=plugin_rpc_get_clusterwide_agent_retval) builder.mock_other_builders_method( target, 'backup_configuration', targets_attr='lbdriver', expected_args=None) builder.mock_all_get_all_deployed(target, return_value=[1]) builder.mock_all_purges(target) assert not target.clean_orphaned_objects_and_save_device_config() builder.check_mocks(target) def global_agent_with_failure(self, builder, target): plugin_rpc_get_clusterwide_agent_retval = dict(host=target.host) get_clusterwide_agent_expected = \ tuple([target.conf.environment_prefix, target.conf.environmentgroup_number]) builder.mock_other_builders_method( target, 'get_clusterwide_agent', targets_attr='plugin_rpc', call_cnt=1, expected_args=get_clusterwide_agent_expected, return_value=plugin_rpc_get_clusterwide_agent_retval) builder.mock_other_builders_method( target, 'backup_configuration', targets_attr='lbdriver', expected_args=None) builder.mock_all_get_all_deployed(target, return_value=[1]) builder.mock_all_purges(target) builder.mock_purge_orphaned_loadbalancers( special_effect=AssertionError) assert target.clean_orphaned_objects_and_save_device_config() not_called = [ 'get_all_deployed_listeners', 'get_all_deployed_l7_policys', 'get_all_deployed_pools', 'get_all_deployed_health_monitors', 'purge_orphaned_listeners', 'purge_orphaned_l7_policys' 'purge_orphaned_pools', 'purge_porphaned_health_monitors'] builder.check_mocks(target, not_called=not_called) def global_agent_different_agent(self, builder, target): plugin_rpc_get_clusterwide_agent_retval = dict(host='not me') get_clusterwide_agent_expected = \ tuple([target.conf.environment_prefix, target.conf.environmentgroup_number]) builder.mock_other_builders_method( target, 'get_clusterwide_agent', targets_attr='plugin_rpc', call_cnt=1, expected_args=get_clusterwide_agent_expected, return_value=plugin_rpc_get_clusterwide_agent_retval) assert target.clean_orphaned_objects_and_save_device_config() builder.check_mocks(target) no_global_agent_exists(self, standalone_builder, target) target = standalone_builder.new_fully_mocked_target() global_agent_exists(self, standalone_builder, target) target = standalone_builder.new_fully_mocked_target() global_agent_different_agent(self, standalone_builder, target) def test_fcwb_purge_orphaned_loadbalancers( self, service_with_loadbalancer, standalone_builder, mock_logger, fully_mocked_target): """FCWBT for purge_orphaned_listeners""" target = fully_mocked_target svc = service_with_loadbalancer def lbs_removed(logger, builder, target, svc): svc['loadbalancer']['provisioning_status'] = 'Unknown' lb = svc['loadbalancer'] lb['hostnames'] = [target.agent_host] lb_id = lb['id'] lbs = {lb_id: lb.copy()} lb_statuses = {lb_id: 'Unknown'} purge_args = dict(tenant_id=lb['tenant_id'], loadbalancer_id=lb_id, hostnames=lb['hostnames']) get_all_args = dict(purge_orphaned_folders=True) builder.mock_other_builders_method( target, 'validate_loadbalancers_state', targets_attr='plugin_rpc', expected=tuple([lb_id]), return_value=lb_statuses) builder.mock_other_builders_method( target, 'purge_orphaned_loadbalancer', targets_attr='lbdriver', expected_args=purge_args) builder.mock_other_builders_method( target, 'get_all_deployed_loadbalancers', targets_attr='lbdriver', expected_args=get_all_args) target.purge_orphaned_loadbalancers(lbs) builder.check_mocks(target) lbs_removed(self.logger, standalone_builder, target, svc) def test_fcwb_purge_orphaned_listeners( self, service_with_listener, standalone_builder, mock_logger, fully_mocked_target): """FCWBT for purge_orphaned_listeners""" target = fully_mocked_target svc = service_with_listener def lstns_removed(logger, builder, target, svc): lst = svc['listeners'][0] lst['provisioning_status'] = 'Unknown' lst['hostnames'] = [target.agent_host] lst_id = lst['id'] lsts = {lst_id: lst.copy()} lst_statuses = {lst_id: 'Unknown'} purge_args = dict(tenant_id=lst['tenant_id'], listeners_id=lst_id, hostnames=lst['hostnames']) builder.mock_other_builders_method( target, 'validate_listeners_state', targets_attr='plugin_rpc', expected_args=tuple([lst_id]), return_value=lst_statuses) builder.mock_other_builders_method( target, 'purge_orphaned_listener', targets_attr='lbdriver', expected_args=purge_args) target.purge_orphaned_listeners(lsts) builder.check_mocks(target) lstns_removed(self.logger, standalone_builder, target, svc) def test_fcwb_purge_orphaned_l7_policys( self, service_with_l7_policy, standalone_builder, mock_logger, fully_mocked_target): """FCWBT for purge_orphaned_l7_policys""" target = fully_mocked_target svc = service_with_l7_policy def pols_removed(logger, builder, target, svc): # fake data manipulation: pol = svc['l7_policies'][0] pol_id = pol['id'] li = svc['listeners'][0] t_id = li['tenant_id'] li_id = li['id'] hostnames = [target.agent_host] deployed_pol = dict(id=pol_id, tenant_id=t_id, hostnames=hostnames) deployed_li = dict(id=li_id, tenant_id=t_id, hostnames=hostnames, l7_policy='') deployed_lis = {li_id: deployed_li} deployed_pols = {pol_id: deployed_pol} # mocks... builder.mock_other_builders_method( target, 'get_all_deployed_listeners', targets_attr='lbdriver', expected_args=tuple([pol_id]), return_value=deployed_lis) builder.mock_other_builders_method( target, 'purge_orphaned_l7_policy', targets_attr='lbdriver') # test... target.purge_orphaned_l7_policys(deployed_pols) # validation... builder.check_mocks(target) pols_removed(self.logger, standalone_builder, target, svc) def test_fcwb_purge_orphaned_pools( self, service_with_pool, standalone_builder, mock_logger, fully_mocked_target): """FCWBT for purge_orphaned_pools""" target = fully_mocked_target svc = service_with_pool def ps_removed(logger, builder, target, svc): p = svc['pools'][0] p['provisioning_status'] = 'Unknown' p_id = p['id'] p['hostnames'] = [target.agent_host] ps = {p_id: p.copy()} p_statuses = {p_id: 'Unknown'} purge_args = dict(tenant_id=p['tenant_id'], pools_id=p_id, hostnames=p['hostnames']) builder.mock_other_builders_method( target, 'validate_pools_state', targets_attr='plugin_rpc', expected_args=tuple([p_id]), return_value=p_statuses) builder.mock_other_builders_method( target, 'purge_orphaned_pool', targets_attr='lbdriver', expected_args=purge_args) target.purge_orphaned_pools(ps) builder.check_mocks(target) ps_removed(self.logger, standalone_builder, target, svc) def test_fcwb_purge_orphaned_health_monitors( self, service_with_health_monitor, standalone_builder, fully_mocked_target, mock_logger): """FCWBT for purge_orphaned_health_monitors""" target = fully_mocked_target svc = service_with_health_monitor def hms_removed(logger, builder, target, svc): hm = svc['healthmonitors'][0] p = svc['pools'][0] p_id = p['id'] hm_id = hm['id'] deployed_monitors = dict( tenant_id=hm['tenant_id'], id=hm_id, hostnames=[target.agent_host]) deployed_pool = dict( tenant_id=p['tenant_id'], id=p_id, monitor=hm_id, hostnames=[target.agent_host]) hms = {hm_id: deployed_monitors} deployed_pool = {p_id: deployed_pool} builder.mock_other_builders_method( target, 'get_all_deployed_pools', targets_attr='lbdriver', return_value=deployed_pool) builder.mock_other_builders_method( target, 'purge_orphaned_health_monitor', targets_attr='lbdriver') target.purge_orphaned_health_monitors(hms) builder.check_mocks(target) hms_removed(self.logger, standalone_builder, target, svc) @pytest.mark.skip(reason='WIP') def test_pbb_clean_orphaned_objects_and_save_device_config( self, service_with_health_monitor, standalone_builder, fully_mocked_target): target = fully_mocked_target svc = service_with_health_monitor def down_to_plugin_rpc_functional(target, builder, svc): hosts = [target.agent_host] fake_bigip = mock.Mock() fake_bigip.status = 'active' fake_bigip.tm.sys.folders.folder.exist.return_value = True prefix = target.lbdriver.service_adapter.prefix fake_bigip.tm.sys.folders.get_collection.return_value = [ prefix + svc['loadbalancer']['tenant_id']] # need to continue down the route of mocking _call() and # system_adapter for list_obj in ['listeners', 'pools', 'healthmonitors']: svc[list_obj]['hostnames'] = hosts svc['loadbalancer']['hostnames'] = hosts _calls_side_effect = [ {'host': target.agent_host}, {}] # we'll just mock... not really validate much... builder.mock_other_builders_method( target, '_call', targets_attr='plugin_rpc', side_effect=_calls_side_effect) down_to_plugin_rpc_functional(target, standalone_builder, svc)
# # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" __doc__ = """ Common helper functions for working with the Microsoft tool chain. """ import copy import os import subprocess import re import SCons.Util logfile = os.environ.get('SCONS_MSCOMMON_DEBUG') if logfile == '-': def debug(x): print x elif logfile: try: import logging except ImportError: debug = lambda x: open(logfile, 'a').write(x + '\n') else: logging.basicConfig(filename=logfile, level=logging.DEBUG) debug = logging.debug else: debug = lambda x: None _is_win64 = None def is_win64(): """Return true if running on windows 64 bits. Works whether python itself runs in 64 bits or 32 bits.""" # Unfortunately, python does not provide a useful way to determine # if the underlying Windows OS is 32-bit or 64-bit. Worse, whether # the Python itself is 32-bit or 64-bit affects what it returns, # so nothing in sys.* or os.* help. # Apparently the best solution is to use env vars that Windows # sets. If PROCESSOR_ARCHITECTURE is not x86, then the python # process is running in 64 bit mode (on a 64-bit OS, 64-bit # hardware, obviously). # If this python is 32-bit but the OS is 64, Windows will set # ProgramW6432 and PROCESSOR_ARCHITEW6432 to non-null. # (Checking for HKLM\Software\Wow6432Node in the registry doesn't # work, because some 32-bit installers create it.) global _is_win64 if _is_win64 is None: # I structured these tests to make it easy to add new ones or # add exceptions in the future, because this is a bit fragile. _is_win64 = False if os.environ.get('PROCESSOR_ARCHITECTURE','x86') != 'x86': _is_win64 = True if os.environ.get('PROCESSOR_ARCHITEW6432'): _is_win64 = True if os.environ.get('ProgramW6432'): _is_win64 = True return _is_win64 def read_reg(value): return SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE, value)[0] def has_reg(value): """Return True if the given key exists in HKEY_LOCAL_MACHINE, False otherwise.""" try: SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, value) ret = True except WindowsError: ret = False return ret # Functions for fetching environment variable settings from batch files. def normalize_env(env, keys, force=False): """Given a dictionary representing a shell environment, add the variables from os.environ needed for the processing of .bat files; the keys are controlled by the keys argument. It also makes sure the environment values are correctly encoded. If force=True, then all of the key values that exist are copied into the returned dictionary. If force=false, values are only copied if the key does not already exist in the copied dictionary. Note: the environment is copied.""" normenv = {} if env: for k in env.keys(): normenv[k] = copy.deepcopy(env[k]).encode('mbcs') for k in keys: if k in os.environ and (force or not k in normenv): normenv[k] = os.environ[k].encode('mbcs') # This shouldn't be necessary, since the default environment should include system32, # but keep this here to be safe, since it's needed to find reg.exe which the MSVC # bat scripts use. sys32_dir = os.path.join(os.environ.get("SystemRoot", os.environ.get("windir",r"C:\Windows\system32")),"System32") if sys32_dir not in normenv['PATH']: normenv['PATH'] = normenv['PATH'] + os.pathsep + sys32_dir debug("PATH: %s"%normenv['PATH']) return normenv def get_output(vcbat, args = None, env = None): """Parse the output of given bat file, with given args.""" if env is None: # Create a blank environment, for use in launching the tools env = SCons.Environment.Environment(tools=[]) # TODO: This is a hard-coded list of the variables that (may) need # to be imported from os.environ[] for v[sc]*vars*.bat file # execution to work. This list should really be either directly # controlled by vc.py, or else derived from the common_tools_var # settings in vs.py. vars = [ 'COMSPEC', # VS100 and VS110: Still set, but modern MSVC setup scripts will # discard these if registry has values. However Intel compiler setup # script still requires these as of 2013/2014. 'VS110COMNTOOLS', 'VS100COMNTOOLS', 'VS90COMNTOOLS', 'VS80COMNTOOLS', 'VS71COMNTOOLS', 'VS70COMNTOOLS', 'VS60COMNTOOLS', ] env['ENV'] = normalize_env(env['ENV'], vars, force=False) if args: debug("Calling '%s %s'" % (vcbat, args)) popen = SCons.Action._subproc(env, '"%s" %s & set' % (vcbat, args), stdin = 'devnull', stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: debug("Calling '%s'" % vcbat) popen = SCons.Action._subproc(env, '"%s" & set' % vcbat, stdin = 'devnull', stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Use the .stdout and .stderr attributes directly because the # .communicate() method uses the threading module on Windows # and won't work under Pythons not built with threading. stdout = popen.stdout.read() stderr = popen.stderr.read() # Extra debug logic, uncomment if necessar # debug('get_output():stdout:%s'%stdout) # debug('get_output():stderr:%s'%stderr) if stderr: # TODO: find something better to do with stderr; # this at least prevents errors from getting swallowed. import sys sys.stderr.write(stderr) if popen.wait() != 0: raise IOError(stderr.decode("mbcs")) output = stdout.decode("mbcs") return output def parse_output(output, keep = ("INCLUDE", "LIB", "LIBPATH", "PATH")): # dkeep is a dict associating key: path_list, where key is one item from # keep, and pat_list the associated list of paths dkeep = dict([(i, []) for i in keep]) # rdk will keep the regex to match the .bat file output line starts rdk = {} for i in keep: rdk[i] = re.compile('%s=(.*)' % i, re.I) def add_env(rmatch, key, dkeep=dkeep): plist = rmatch.group(1).split(os.pathsep) for p in plist: # Do not add empty paths (when a var ends with ;) if p: p = p.encode('mbcs') # XXX: For some reason, VC98 .bat file adds "" around the PATH # values, and it screws up the environment later, so we strip # it. p = p.strip('"') dkeep[key].append(p) for line in output.splitlines(): for k,v in rdk.items(): m = v.match(line) if m: add_env(m, k) return dkeep # TODO(sgk): unused def output_to_dict(output): """Given an output string, parse it to find env variables. Return a dict where keys are variables names, and values their content""" envlinem = re.compile(r'^([a-zA-z0-9]+)=([\S\s]*)$') parsedenv = {} for line in output.splitlines(): m = envlinem.match(line) if m: parsedenv[m.group(1)] = m.group(2) return parsedenv # TODO(sgk): unused def get_new(l1, l2): """Given two list l1 and l2, return the items in l2 which are not in l1. Order is maintained.""" # We don't try to be smart: lists are small, and this is not the bottleneck # is any case new = [] for i in l2: if i not in l1: new.append(i) return new # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
# Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Lint as: python3 """Module for the harvest transformation. This module contains a general-purpose set of tools for transforming functions with a specific side-effect mechanism into pure functions. The names of the transformations in this module are inspired by the Sow/Reap mechanism in Mathematica. The harvest module exposes two main functions: `sow` and `harvest`. `sow` is used to tag values and `harvest` can inject values into functions or pull out tagged values. `harvest` is a very general purpose transformation purely focused on converting functions that have special side-effects (defined using `sow`) and "functionalizing" them. Specifically, a function `f :: (x: X) -> Y` has a set of defined intermediates, or `Sows`. This set can be divided into intermediates you are "collecting" and intermediates you are "injecting", or `Reaps` and `Plants` respectively. Functionalizing `f` now gives you `harvest(f) :: (plants: Plants, x: X) -> Tuple[Y, Reaps]`. Generally, most users will not need to use `harvest` directly, but will use wrappers around it. ## `sow` `sow` is the function used to tag values in a function. It takes in a single positional argument, `value`, which is returned as an output, so `sow` outside of a tracing context behaves like the identity function, i.e. `sow(x, ...) == x`. It also takes in two mandatory keyword arguments, `tag` and `name`. `tag` is a string used to namespace intermediate values in a function. For example, some intermediates may be useful for probabilistic programming (samples), and others may be useful to logging (summaries). The tag enables `harvest` to interact with only one set of intermediates at a time. The `name` is a string that describes the value you are `sow`-ing. Eventually, when calling `harvest` on a function, the `name` is used as the identifier for the intermediate value. Finally, `sow` takes in an optional string keyword argument `mode`, which is by default set to `'strict'`. The `mode` of a `sow` describes how it behaves when the same name appears multiple times. In "strict" mode, `sow` will error if the same `(tag, name)` appears more than once. Another option is `'append'`, in which all sows of the same name will be appended into a growing array. Finally, there is `'clobber'`, where only the final sown value for a given `(tag, name)` will be returned. The final optional argument for `sow` is `key`, which will automatically be tied-in to the output of `sow` to introduce a fake data-dependence. By default, it is `None`. ## `harvest` `harvest` is a function transformation that augments the behaviors of `sow`s in the function body. Recall, that by default, `sow`s act as identity functions and do not affect the semantics of a function. Harvesting `f` produces a function that can take advantage of `sow`s present in its execution. `harvest` is a function that takes in a function `f` and a string `tag`. `harvest` will only interact with `sow`s whose tag matches the input `tag`. The returned function can interact with the `sow`s in the function body in either of two ways. The first is via "injection", where intermediate values in the function values can be overridden. `harvest(f)` takes in an additional initial argument, `plants`, a dictionary mapping names to values. Each name in `plants` should correspond to a `sow` in `f`, and while running `harvest(f)` rather than using the value at runtime for the `sow`, we substitute in the value from the `plants` dictionary. The other way in which `harvest(f)` interacts with `sow`s is that if it encounters a `sow` whose tag matches and whose name is *not* in `plants`, it will add the output of the `sow` to a dictionary mapping the sow name to its output, called `reaps`. The `reaps` dictionary, at the end of `harvest(f)`'s execution, will contain the outputs of all `sow`s whose values were not injected, or "planted." The general convention is that, for any given execution of `harvest(f, tag=tag)`, there will be *no more remaining sows* of the given tag if the function were to be reharvested, i.e. if we were to nest harvests with the same tag `harvest(harvest(f, tag='some_tag'), tag='some_tag')`, the outer harvest would have nothing to plant or to reap. ## Examples: #### Using `sow` and `harvest` ```python def f(x): y = sow(x + 1., tag='intermediate', name='y') return y + 1. # Injecting, or "planting" a value for `y`. harvest(f, tag='intermediate')({'y': 0.}, 1.) # ==> (1., {}) harvest(f, tag='intermediate')({'y': 0.}, 5.) # ==> (1., {}) # Collecting , or "reaping" the value of `y`. harvest(f, tag='intermediate')({}, 1.) # ==> (3., {'y': 2.}) harvest(f, tag='intermediate')({}, 5.) # ==> (7., {'y': 6.}) ``` #### Using `reap` and `plant`. `reap` and `plant` are simple wrappers around `harvest`. `reap` only pulls intermediate values without injecting, and `plant` only injects values without collecting intermediate values. ```python def f(x): y = sow(x + 1., tag='intermediate', name='y') return y + 1. # Injecting, or "planting" a value for `y`. plant(f, tag='intermediate')({'y': 0.}, 1.) # ==> 1. plant(f, tag='intermediate')({'y': 0.}, 5.) # ==> 1. # Collecting , or "reaping" the value of `y`. reap(f, tag='intermediate')(1.) # ==> {'y': 2.} reap(f, tag='intermediate')(5.) # ==> {'y': 6.} ``` #### Sharp edges * `harvest` has undefined semantics under autodifferentiation. If a function you're taking the gradient of has a `sow`, it might produce unintuitive results when harvested. To better control gradient semantics, you can use `jax.custom_jvp` or `jax.custom_vjp`. The current implementation sows primals and tangents in the JVP but ignore cotangents in the VJP. These particular semantics are subject to change. * Planting values into a `pmap` is partially working. Harvest tries to plant all the values, assuming they have a leading map dimension. """ import collections import dataclasses import functools from typing import Any, Callable, Dict, FrozenSet, Iterable, List, Optional, Tuple, Union from jax import abstract_arrays from jax import api_util from jax import core as jax_core from jax import lax from jax import linear_util as lu from jax import tree_util from jax import util as jax_util from jax._src.lax import control_flow as lcf from jax.interpreters import ad from jax.interpreters import batching from jax.interpreters import mlir from jax.interpreters import partial_eval as pe from jax.interpreters import xla from jax.lib import xla_client as xc import jax.numpy as jnp from oryx.core import primitive as prim from oryx.core import trace_util __all__ = [ 'HarvestTrace', 'HarvestTracer', 'call_and_reap', 'harvest', 'nest', 'plant', 'reap', 'sow', ] Value = Any sow_p = jax_core.Primitive('sow') sow_p.multiple_results = True @sow_p.def_impl def _sow_impl(*args, **_): return args @sow_p.def_abstract_eval def _sow_abstract_eval(*avals, **_): return avals @functools.partial(ad.deflinear, sow_p) def _sow_transpose(cts_in, *_, **__): return cts_in def _sow_batch_rule(batched_args, batch_dims, **params): outs = sow_p.bind(*batched_args, **params) return outs, batch_dims batching.primitive_batchers[sow_p] = _sow_batch_rule xla.translations[sow_p] = lambda c, *args, **params: xc.ops.Tuple(c, args) def sow(value, *, tag: str, name: str, mode: str = 'strict', key=None): """Marks a value with a name and a tag. Args: value: A JAX value to be tagged and named. tag: a string representing the tag of the sown value. name: a string representing the name to sow the value with. mode: The mode by which to sow the value. There are three options: 1. `'strict'` - if another value is sown with the same name and tag in the same context, harvest will throw an error. 2. `'clobber'` - if another is value is sown with the same name and tag, it will replace this value 3. `'append'` - sown values of the same name and tag are appended to a growing list. Append mode assumes some ordering on the values being sown defined by data-dependence. key: an optional JAX value that will be tied into the sown value. Returns: The original `value` that was passed in. """ if key is not None: value = prim.tie_in(key, value) flat_args, in_tree = tree_util.tree_flatten(value) out_flat = sow_p.bind(*flat_args, name=name, tag=tag, mode=mode, tree=in_tree) return tree_util.tree_unflatten(in_tree, out_flat) nest_p = jax_core.CallPrimitive('nest') def _nest_impl(f, *args, **_): with jax_core.new_sublevel(): return f.call_wrapped(*args) nest_p.def_impl(_nest_impl) def _nest_translation_rule(*args, name, call_jaxpr, scope, **_): return xla._xla_call_translation_rule( # pylint: disable=protected-access *args, name=jax_util.wrap_name(name, f'nest[{scope}]'), call_jaxpr=call_jaxpr, donated_invars=(False,) * len(args)) xla.register_translation(nest_p, _nest_translation_rule) def _nest_lowering(ctx, *args, name, call_jaxpr, scope, **_): return mlir._xla_call_lower( # pylint: disable=protected-access ctx, *args, name=jax_util.wrap_name(name, f'nest[{scope}]'), call_jaxpr=call_jaxpr, donated_invars=(False,) * len(args)) mlir.register_lowering(nest_p, _nest_lowering) def _nest_transpose_rule(*args, **kwargs): return ad.call_transpose(nest_p, *args, **kwargs) ad.primitive_transposes[nest_p] = _nest_transpose_rule def nest(f, *, scope: str): """Wraps a function to create a new scope for harvested values. Harvested values live in one dynamic name scope (for a particular tag), and in strict mode, values with the same name cannot be collected or injected more than once. nest(f, scope=<name>) will take all tagged values in `f` and put them into a nested dictionary with key <name>. This enables having duplicate names in one namespace provided they are in different scopes. This is different from using a separate tag to namespace, as it enables creating nested/hierarchical structure within a single tag's namespace. Example: ```python def foo(x): return sow(x, tag='test', name='x') harvest(foo, tag='test')({}, 1.) # (1., {'x': 1.}) harvest(nest(foo, scope='a'), tag='test')({}, 1.) # (1., {'a': {'x': 1.}}) ``` Args: f: a function to be transformed scope: a string that will act as the parent scope of all values tagged in `f`. Returns: A semantically identical function to `f`, but when harvested, uses nested values according to the input scope. """ def wrapped(*args, **kwargs): fun = lu.wrap_init(f, kwargs) flat_args, in_tree = tree_util.tree_flatten(args) flat_fun, out_tree = api_util.flatten_fun_nokwargs(fun, in_tree) out_flat = nest_p.bind( flat_fun, *flat_args, scope=scope, name=getattr(f, '__name__', '<no name>')) return tree_util.tree_unflatten(out_tree(), out_flat) return wrapped class HarvestTrace(jax_core.Trace): """An evaluating trace that dispatches to a dynamic context.""" def pure(self, val: Value) -> 'HarvestTracer': return HarvestTracer(self, val) def sublift(self, tracer: 'HarvestTracer') -> 'HarvestTracer': return self.pure(tracer.val) def lift(self, val: Value) -> 'HarvestTracer': return self.pure(val) def process_primitive( self, primitive: jax_core.Primitive, tracers: List['HarvestTracer'], params: Dict[str, Any]) -> Union['HarvestTracer', List['HarvestTracer']]: context = trace_util.get_dynamic_context(self) custom_rule = context.get_custom_rule(primitive) if custom_rule: return custom_rule(self, *tracers, **params) return self.default_process_primitive(primitive, tracers, params) def default_process_primitive( self, primitive: jax_core.Primitive, tracers: List['HarvestTracer'], params: Dict[str, Any]) -> Union['HarvestTracer', List['HarvestTracer']]: context = trace_util.get_dynamic_context(self) vals = [t.val for t in tracers] if primitive is sow_p: outvals = context.process_sow(*vals, **params) return jax_util.safe_map(self.pure, outvals) outvals = primitive.bind(*vals, **params) if not primitive.multiple_results: outvals = [outvals] out_tracers = jax_util.safe_map(self.pure, outvals) if primitive.multiple_results: return out_tracers return out_tracers[0] def process_call(self, call_primitive: jax_core.Primitive, f: Any, tracers: List['HarvestTracer'], params: Dict[str, Any]): context = trace_util.get_dynamic_context(self) if call_primitive is nest_p: return context.process_nest(self, f, *tracers, **params) return context.process_higher_order_primitive(self, call_primitive, f, tracers, params, False) def post_process_call(self, call_primitive, out_tracers, params): vals = tuple(t.val for t in out_tracers) master = self.main def todo(x): trace = HarvestTrace(master, jax_core.cur_sublevel()) return jax_util.safe_map(functools.partial(HarvestTracer, trace), x) return vals, todo def process_map(self, call_primitive: jax_core.Primitive, f: Any, tracers: List['HarvestTracer'], params: Dict[str, Any]): context = trace_util.get_dynamic_context(self) return context.process_higher_order_primitive(self, call_primitive, f, tracers, params, True) post_process_map = post_process_call def process_custom_jvp_call(self, primitive, fun, jvp, tracers): # This implementation just drops the custom derivative rule. # TODO(mattjj,sharadmv): don't drop the custom derivative rule del primitive, jvp # Unused. return fun.call_wrapped(*tracers) def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers, out_trees): # This implementation just drops the custom derivative rule. # TODO(mattjj,sharadmv): don't drop the custom derivative rule del primitive, fwd, bwd, out_trees # Unused. return fun.call_wrapped(*tracers) class HarvestTracer(jax_core.Tracer): """A `HarvestTracer` just encapsulates a single value.""" def __init__(self, trace: 'HarvestTrace', val: Value): self._trace = trace self.val = val @property def aval(self): return abstract_arrays.raise_to_shaped(jax_core.get_aval(self.val)) def full_lower(self): return self @dataclasses.dataclass(frozen=True) class HarvestSettings: """Contains the settings for a HarvestTrace.""" tag: str blocklist: FrozenSet[str] allowlist: Union[FrozenSet[str], None] exclusive: bool @dataclasses.dataclass class HarvestContext: """A context that handles `sow`s and `nest`s in a `HarvestTrace`.""" settings: HarvestSettings def process_sow(self, *values, name, tag, mode, tree): """Handles a `sow` primitive in a `HarvestTrace`.""" if mode not in {'strict', 'append', 'clobber'}: raise ValueError(f'Invalid mode: {mode}') if tag != self.settings.tag: if self.settings.exclusive: return values return sow_p.bind(*values, name=name, tag=tag, tree=tree, mode=mode) if (self.settings.allowlist is not None and name not in self.settings.allowlist): return values if name in self.settings.blocklist: return values return self.handle_sow(*values, name=name, tag=tag, tree=tree, mode=mode) def get_custom_rule(self, primitive): raise NotImplementedError def handle_sow(self, *values, name, tag, mode, tree): raise NotImplementedError def process_nest(self, trace, f, *tracers, scope, name): raise NotImplementedError def process_higher_order_primitive(self, trace: HarvestTrace, call_primitive: jax_core.Primitive, f: Any, tracers: List['HarvestTracer'], params: Dict[str, Any], is_map: bool): raise NotImplementedError reap_custom_rules = {} @dataclasses.dataclass class Reap: value: Any metadata: Dict[str, Any] @dataclasses.dataclass class ReapContext(HarvestContext): """Contains the settings and storage for the current trace in the stack.""" settings: HarvestSettings reaps: Dict[str, Reap] def get_custom_rule(self, primitive): return reap_custom_rules.get(primitive) def handle_sow(self, *values, name, tag, tree, mode): """Stores a sow in the reaps dictionary.""" del tag if name in self.reaps: raise ValueError(f'Variable has already been reaped: {name}') avals = tree_util.tree_unflatten( tree, [abstract_arrays.raise_to_shaped(jax_core.get_aval(v)) for v in values]) self.reaps[name] = Reap( tree_util.tree_unflatten(tree, values), dict(mode=mode, aval=avals)) return values def reap_higher_order_primitive(self, trace, call_primitive, f, tracers, params, is_map): """Wraps the inner function with a reap trace.""" name = jax_util.wrap_name(params.pop('name', f.__name__), 'reap') vals = [t.val for t in tracers] f, aux = reap_eval(f, trace, self.settings) if is_map: out_axes_thunk = params['out_axes_thunk'] @jax_util.as_hashable_function(closure=('harvest', out_axes_thunk)) def new_out_axes_thunk(): out_axes = out_axes_thunk() assert all(out_axis == 0 for out_axis in out_axes) out_tree, _ = aux() return (0,) * out_tree.num_leaves params = dict(params, out_axes_thunk=new_out_axes_thunk) out_flat = call_primitive.bind(f, *vals, name=name, **params) out_tree, metadata = aux() out_vals, reaps = tree_util.tree_unflatten(out_tree, out_flat) out_tracers = jax_util.safe_map(trace.pure, out_vals) reap_tracers = tree_util.tree_map(trace.pure, reaps) return out_tracers, reap_tracers, metadata def process_nest(self, trace, f, *tracers, scope, name, **params): out_tracers, reap_tracers, _ = self.reap_higher_order_primitive( trace, nest_p, f, tracers, dict(params, name=name, scope=scope), False) tag = self.settings.tag if reap_tracers: flat_reap_tracers, reap_tree = tree_util.tree_flatten(reap_tracers) trace.process_primitive( sow_p, flat_reap_tracers, dict(name=scope, tag=tag, tree=reap_tree, mode='strict')) return out_tracers def process_higher_order_primitive(self, trace, call_primitive, f, tracers, params, is_map): out_tracers, reap_tracers, metadata = self.reap_higher_order_primitive( trace, call_primitive, f, tracers, params, is_map) tag = self.settings.tag for k, v in reap_tracers.items(): flat_reap_tracers, reap_tree = tree_util.tree_flatten(v) trace.process_primitive( sow_p, flat_reap_tracers, dict(name=k, tag=tag, tree=reap_tree, mode=metadata[k]['mode'])) return out_tracers @lu.transformation def reap_function(main: jax_core.MainTrace, settings: HarvestSettings, return_metadata: bool, args: Iterable[Any]): """A function transformation that returns reap values.""" trace = HarvestTrace(main, jax_core.cur_sublevel()) in_tracers = jax_util.safe_map(trace.pure, args) context = ReapContext(settings, {}) with trace_util.new_dynamic_context(main, context): ans = yield in_tracers, {} out_tracers = jax_util.safe_map(trace.full_raise, ans) reap_tracers = tree_util.tree_map(lambda x: trace.full_raise(x.value), context.reaps) reap_metadata = tree_util.tree_map(lambda x: x.metadata, context.reaps) del main out_values, reap_values = tree_util.tree_map(lambda x: x.val, (out_tracers, reap_tracers)) if return_metadata: out = (out_values, reap_values, reap_metadata) else: out = (out_values, reap_values) yield out def reap_eval( f: lu.WrappedFun, trace: HarvestTrace, settings: HarvestSettings) -> Tuple[lu.WrappedFun, Callable[[], Any]]: f = reap_function(f, trace.main, settings, True) return reap_wrapper(f, trace) @lu.transformation_with_aux def reap_wrapper(trace: HarvestTrace, *args): del trace out, reaps, metadata = yield (args,), {} out_flat, out_tree = tree_util.tree_flatten((out, reaps)) yield out_flat, (out_tree, metadata) def call_and_reap(f, *, tag: str, allowlist: Optional[Iterable[str]] = None, blocklist: Iterable[str] = frozenset(), exclusive: bool = False): """Transforms a function into one that additionally returns its sown values. Args: f: a function to be transformed. tag: a string tag; only sown values with `tag` will be reaped. allowlist: an optional sequence of string names, which if provided will enforce that only sows with names in the allowlist will be reaped. blocklist: an optional sequence of string names, which if provided will enforce that only no sows with names in the blocklist will be reaped. exclusive: determines whether or not to execute in "exclusive" mode where other tags are removed during execution. Returns: A new function that executes the original and returns its sown values as an additional return value. """ blocklist = frozenset(blocklist) if allowlist is not None: allowlist = frozenset(allowlist) settings = HarvestSettings(tag, blocklist, allowlist, exclusive) def wrapped(*args, **kwargs): fun = lu.wrap_init(f, kwargs) flat_args, in_tree = tree_util.tree_flatten(args) flat_fun, out_tree = api_util.flatten_fun_nokwargs(fun, in_tree) with jax_core.new_main(HarvestTrace) as main: flat_fun = reap_function(flat_fun, main, settings, False) out_flat, reaps = flat_fun.call_wrapped(flat_args) del main return tree_util.tree_unflatten(out_tree(), out_flat), reaps return wrapped def reap(f, *, tag: str, allowlist: Optional[Iterable[str]] = None, blocklist: Iterable[str] = frozenset(), exclusive: bool = False): """Transforms a function into one that returns its sown values. Args: f: a function to be transformed. tag: a string tag; only sown values with `tag` will be reaped. allowlist: an optional sequence of string names, which if provided will enforce that only sows with names in the allowlist will be reaped. blocklist: an optional sequence of string names, which if provided will enforce that only no sows with names in the blocklist will be reaped. exclusive: determines whether or not to execute in "exclusive" mode where other tags are removed during execution. Returns: A new function that executes the original and returns its sown values. """ def wrapped(*args, **kwargs): return call_and_reap( f, tag=tag, allowlist=allowlist, blocklist=blocklist, exclusive=exclusive)(*args, **kwargs)[1] return wrapped @lu.transformation_with_aux def _reap_metadata_wrapper(*args): out, reaps, metadata = yield (args,), {} yield (out, reaps), metadata def _get_harvest_metadata(closed_jaxpr, settings, *args): """Probes a jaxpr for metadata like its sown values.""" fun = lu.wrap_init(jax_core.jaxpr_as_fun(closed_jaxpr)) with jax_core.new_main(HarvestTrace) as main: settings = HarvestSettings(settings.tag, settings.blocklist, settings.allowlist, True) fun = reap_function(fun, main, settings, True) fun, aux = _reap_metadata_wrapper(fun) flat_args, in_tree = tree_util.tree_flatten(args) flat_fun, out_tree = api_util.flatten_fun_nokwargs(fun, in_tree) in_avals = jax_util.safe_map( lambda a: abstract_arrays.raise_to_shaped(jax_core.get_aval(a)), flat_args) pe.trace_to_jaxpr_final(flat_fun, in_avals) metadata = aux() out_tree() return metadata def _reap_scan_rule(trace: HarvestTrace, *tracers, length, reverse, jaxpr, num_consts, num_carry, linear, unroll): """Reaps the body of a scan to pull out `clobber` and `append` sows.""" const_tracers, carry_tracers, xs_tracers = jax_util.split_list( tracers, [num_consts, num_carry]) _, carry_avals, xs_avals = tree_util.tree_map( lambda x: x.aval, (const_tracers, carry_tracers, xs_tracers)) const_vals, carry_vals, xs_vals = tree_util.tree_map( lambda x: x.val, (const_tracers, carry_tracers, xs_tracers)) context = trace_util.get_dynamic_context(trace) settings = context.settings x_tracers = [t[0] if hasattr(t, '_getitem') else t for t in xs_tracers] x_avals = [t.aval for t in x_tracers] x_vals = [t.val for t in x_tracers] metadata = _get_harvest_metadata(jaxpr, settings, *(const_vals + carry_vals + x_vals)) reap_modes = collections.defaultdict(set) reap_carry_avals = {} for name, meta in metadata.items(): mode = meta['mode'] aval = meta['aval'] if mode == 'strict': raise ValueError(f'Cannot use strict mode for \'{name}\' inside `scan`.') reap_modes[mode].add(name) if mode == 'clobber': reap_carry_avals[name] = aval body_fun = jax_core.jaxpr_as_fun(jaxpr) reap_carry_flat_avals, _ = tree_util.tree_flatten(reap_carry_avals) reap_carry_in_tree = tree_util.tree_structure( ((carry_avals, reap_carry_avals), xs_avals)) def new_body(carry, x): carry, _ = carry all_values = const_vals + tree_util.tree_leaves((carry, x)) out, reaps = call_and_reap( body_fun, tag=settings.tag, allowlist=settings.allowlist, blocklist=settings.blocklist, exclusive=settings.exclusive)(*all_values) carry_out, y = jax_util.split_list(out, [num_carry]) carry_reaps = { name: val for name, val in reaps.items() if name in reap_modes['clobber'] } xs_reaps = { name: val for name, val in reaps.items() if name in reap_modes['append'] } return (carry_out, carry_reaps), (y, xs_reaps) new_body_jaxpr, consts, out_tree = lcf._initial_style_jaxpr( # pylint: disable=protected-access new_body, reap_carry_in_tree, tuple(carry_avals + reap_carry_flat_avals + x_avals)) dummy_reap_carry_vals = tree_util.tree_map( lambda x: jnp.zeros(x.shape, x.dtype), reap_carry_flat_avals) out = lax.scan_p.bind( *(consts + carry_vals + dummy_reap_carry_vals + xs_vals), reverse=reverse, length=length, jaxpr=new_body_jaxpr, num_consts=len(consts), num_carry=len(carry_vals + dummy_reap_carry_vals), linear=(linear[:len(consts)] + (False,) * len(dummy_reap_carry_vals) + linear[len(consts):]), unroll=unroll) (carry_out, carry_reaps), (ys, ys_reaps) = tree_util.tree_unflatten(out_tree, out) (carry_out, carry_reaps), (ys, ys_reaps) = tree_util.tree_map( trace.pure, ((carry_out, carry_reaps), (ys, ys_reaps))) for k, v in {**carry_reaps, **ys_reaps}.items(): sow(v, tag=settings.tag, mode=metadata[k]['mode'], name=k) return carry_out + ys reap_custom_rules[lcf.scan_p] = _reap_scan_rule def _reap_while_rule(trace: HarvestTrace, *tracers, cond_jaxpr, body_jaxpr, cond_nconsts, body_nconsts): """Reaps the body of a while loop to get the reaps of the final iteration.""" cond_const_tracers, body_const_tracers, init_tracers = jax_util.split_list( tracers, [cond_nconsts, body_nconsts]) _, init_avals = tree_util.tree_map(lambda x: x.aval, (body_const_tracers, init_tracers)) cond_const_vals, body_const_vals, init_vals = tree_util.tree_map( lambda x: x.val, (cond_const_tracers, body_const_tracers, init_tracers)) context = trace_util.get_dynamic_context(trace) settings = context.settings body_metadata = _get_harvest_metadata(body_jaxpr, settings, *(body_const_tracers + init_tracers)) for k, meta in body_metadata.items(): mode = meta['mode'] if mode != 'clobber': raise ValueError( f'Must use clobber mode for \'{k}\' inside of a `while_loop`.') reap_avals = {k: v['aval'] for k, v in body_metadata.items()} cond_fun = jax_core.jaxpr_as_fun(cond_jaxpr) body_fun = jax_core.jaxpr_as_fun(body_jaxpr) reap_settings = dict( tag=settings.tag, allowlist=settings.allowlist, blocklist=settings.blocklist, exclusive=settings.exclusive) def new_cond(carry, _): return cond_fun(*(cond_const_vals + carry)) def new_body(carry, _): carry, reaps = call_and_reap(body_fun, **reap_settings)(*(body_const_vals + carry)) return (carry, reaps) new_in_avals, new_in_tree = tree_util.tree_flatten((init_avals, reap_avals)) new_cond_jaxpr, cond_consts, _ = lcf._initial_style_jaxpr( # pylint: disable=protected-access new_cond, new_in_tree, tuple(new_in_avals)) new_body_jaxpr, body_consts, out_tree = lcf._initial_style_jaxpr( # pylint: disable=protected-access new_body, new_in_tree, tuple(new_in_avals)) dummy_reap_vals = tree_util.tree_map(lambda x: jnp.zeros(x.shape, x.dtype), reap_avals) new_in_vals = tree_util.tree_leaves((init_vals, dummy_reap_vals)) out = lax.while_p.bind( *(cond_consts + body_consts + new_in_vals), cond_nconsts=len(cond_consts), body_nconsts=len(body_consts), cond_jaxpr=new_cond_jaxpr, body_jaxpr=new_body_jaxpr) out = jax_util.safe_map(trace.pure, out) out, reaps = tree_util.tree_unflatten(out_tree, out) for k, v in reaps.items(): sow(v, name=k, tag=settings.tag, mode=body_metadata[k]['mode']) return out reap_custom_rules[lcf.while_p] = _reap_while_rule def _check_branch_metadata(branch_metadatas): """Checks that a set of harvest metadata are consistent with each other.""" first_branch_meta = branch_metadatas[0] for branch_metadata in branch_metadatas[1:]: if len(branch_metadata) != len(first_branch_meta): raise ValueError('Mismatching number of `sow`s between branches.') for name, meta in branch_metadata.items(): if name not in first_branch_meta: raise ValueError(f'Missing sow in branch: \'{name}\'.') first_meta_aval = first_branch_meta[name]['aval'] if meta['aval'].shape != first_meta_aval.shape: raise ValueError(f'Mismatched shape between branches: \'{name}\'.') if meta['aval'].dtype != first_meta_aval.dtype: raise ValueError(f'Mismatched dtype between branches: \'{name}\'.') def _reap_cond_rule(trace, *tracers, branches, linear): """Reaps each path of the `cond`.""" index_tracer, ops_tracers = tracers[0], tracers[1:] index_val, ops_vals = tree_util.tree_map(lambda x: x.val, (index_tracer, ops_tracers)) _, ops_avals = tree_util.tree_map(lambda x: x.aval, (index_tracer, ops_tracers)) context = trace_util.get_dynamic_context(trace) settings = context.settings reap_settings = dict( tag=settings.tag, allowlist=settings.allowlist, blocklist=settings.blocklist, exclusive=settings.exclusive) branch_metadatas = tuple( _get_harvest_metadata(branch, settings, *ops_tracers) for branch in branches) _check_branch_metadata(branch_metadatas) branch_funs = tuple(map(jax_core.jaxpr_as_fun, branches)) reaped_branches = tuple( call_and_reap(f, **reap_settings) for f in branch_funs) in_tree = tree_util.tree_structure(ops_avals) new_branch_jaxprs, consts, out_trees = ( lcf._initial_style_jaxprs_with_common_consts( # pylint: disable=protected-access reaped_branches, in_tree, ops_avals, lax.cond_p.name)) out = lax.cond_p.bind( index_val, *(tuple(consts) + ops_vals), branches=tuple(new_branch_jaxprs), linear=(False,) * len(tuple(consts) + linear)) out = jax_util.safe_map(trace.pure, out) out, reaps = tree_util.tree_unflatten(out_trees[0], out) for k, v in reaps.items(): sow(v, name=k, tag=settings.tag, mode=branch_metadatas[0][k]['mode']) return out reap_custom_rules[lcf.cond_p] = _reap_cond_rule plant_custom_rules = {} @dataclasses.dataclass class PlantContext(HarvestContext): """Contains the settings and storage for the current trace in the stack.""" settings: HarvestSettings plants: Dict[str, Any] def __post_init__(self): self._already_planted = set() def get_custom_rule(self, primitive): return plant_custom_rules.get(primitive) def handle_sow(self, *values, name, tag, tree, mode): """Returns the value stored in the plants dictionary.""" if name in self._already_planted: raise ValueError(f'Variable has already been planted: {name}') if name in self.plants: self._already_planted.add(name) return tree_util.tree_leaves(self.plants[name]) return sow_p.bind(*values, name=name, tag=tag, mode=mode, tree=tree) def process_nest(self, trace, f, *tracers, scope, name, **params): return self.process_higher_order_primitive( trace, nest_p, f, tracers, dict(params, name=name, scope=scope), False) def process_higher_order_primitive(self, trace, call_primitive, f, tracers, params, is_map): del is_map name = jax_util.wrap_name(params.pop('name', f.__name__), 'reap') context = trace_util.get_dynamic_context(trace) vals = [t.val for t in tracers] plants = context.plants if 'in_axes' in params: # TODO(b/199459308): figure out if invars are mapped or unmapped params = dict( params, in_axes=(0,) * len(tree_util.tree_leaves(plants)) + params['in_axes']) if 'donated_invars' in params: params = dict(params) params['donated_invars'] = ( (False,) * len(tree_util.tree_leaves(plants)) + params['donated_invars']) elif call_primitive is nest_p: plants = plants.get(params['scope'], {}) all_vals, all_tree = tree_util.tree_flatten((plants, vals)) f = plant_eval(f, trace, self.settings, all_tree) out_vals = call_primitive.bind(f, *all_vals, name=name, **params) return jax_util.safe_map(trace.pure, out_vals) @lu.transformation def plant_function(main: jax_core.MainTrace, settings: HarvestSettings, in_tree: Any, args: Iterable[Any]): """A function transformation that injects values in place of sows.""" trace = HarvestTrace(main, jax_core.cur_sublevel()) plants, args = tree_util.tree_unflatten(in_tree, args) args = jax_util.safe_map(trace.pure, args) context = PlantContext(settings, plants) with trace_util.new_dynamic_context(main, context): ans = yield args, {} out_tracers = jax_util.safe_map(trace.full_raise, ans) del main yield [t.val for t in out_tracers] def plant_eval(f: lu.WrappedFun, trace: HarvestTrace, settings: HarvestSettings, all_tree: Any) -> Tuple[lu.WrappedFun, Callable[[], Any]]: f = plant_function(f, trace.main, settings, all_tree) return plant_wrapper(f) @lu.transformation def plant_wrapper(*args): out = yield (args,), {} yield out def plant(f, *, tag: str, allowlist: Optional[Iterable[str]] = None, blocklist: Iterable[str] = frozenset(), exclusive: bool = False): """Transforms a function into one that injects values in place of sown ones. Args: f: a function to be transformed. tag: a string tag; only sown values with `tag` will be planted. allowlist: an optional sequence of string names, which if provided will enforce that only sows with names in the allowlist will be planted. blocklist: an optional sequence of string names, which if provided will enforce that only no sows with names in the blocklist will be planted. exclusive: determines whether or not to execute in "exclusive" mode where other tags are removed during execution. Returns: A new function that takes in a dictionary of planted values in addition to the original function's inputs, and injects the planted values in place of sown values. """ blocklist = frozenset(blocklist) if allowlist is not None: allowlist = frozenset(allowlist) settings = HarvestSettings(tag, blocklist, allowlist, exclusive) def wrapped(plants, *args, **kwargs): fun = lu.wrap_init(f, kwargs) flat_args, in_tree = tree_util.tree_flatten(args) flat_fun, out_tree = api_util.flatten_fun_nokwargs(fun, in_tree) all_args, all_tree = tree_util.tree_flatten((plants, flat_args)) with jax_core.new_main(HarvestTrace) as main: flat_fun = plant_function(flat_fun, main, settings, all_tree) out_flat = flat_fun.call_wrapped(all_args) del main return tree_util.tree_unflatten(out_tree(), out_flat) return wrapped def _plant_scan_rule(trace: HarvestTrace, *tracers, length, reverse, jaxpr, num_consts, num_carry, linear, unroll): """Injects values into a scan according to their sow mode.""" const_tracers, carry_tracers, xs_tracers = jax_util.split_list( tracers, [num_consts, num_carry]) carry_avals, xs_avals = tree_util.tree_map(lambda x: x.aval, (carry_tracers, xs_tracers)) const_vals, carry_vals, xs_vals = tree_util.tree_map( lambda x: x.val, (const_tracers, carry_tracers, xs_tracers)) context = trace_util.get_dynamic_context(trace) settings = context.settings x_tracers = [t[0] if hasattr(t, '_getitem') else t for t in xs_tracers] x_avals = [t.aval for t in x_tracers] metadata = _get_harvest_metadata(jaxpr, settings, *(const_tracers + carry_tracers + x_tracers)) plants = context.plants plant_modes = collections.defaultdict(set) plant_xs_avals = {} for name, meta in metadata.items(): mode = meta['mode'] aval = meta['aval'] if mode == 'strict': raise ValueError(f'Cannot use strict mode for \'{name}\' inside `scan`.') plant_modes[mode].add(name) if mode == 'append' and name in plants: plant_xs_avals[name] = aval body_fun = jax_core.jaxpr_as_fun(jaxpr) clobber_plants = { name: value for name, value in plants.items() if name in plant_modes['clobber'] } append_plants = { name: value for name, value in plants.items() if name in plant_modes['append'] } plant_xs_flat_avals, _ = tree_util.tree_flatten(plant_xs_avals) plant_xs_in_tree = tree_util.tree_structure( (carry_avals, (xs_avals, plant_xs_avals))) def new_body(carry, x): x, plants = x all_plants = {**plants, **clobber_plants} all_values = const_vals + tree_util.tree_leaves((carry, x)) out = plant( body_fun, tag=settings.tag, allowlist=settings.allowlist, blocklist=settings.blocklist, exclusive=settings.exclusive)(all_plants, *all_values) carry_out, y = jax_util.split_list(out, [num_carry]) return carry_out, y new_body_jaxpr, consts, _ = lcf._initial_style_jaxpr( # pylint: disable=protected-access new_body, plant_xs_in_tree, tuple(carry_avals + x_avals + plant_xs_flat_avals)) plant_vals = tree_util.tree_leaves(append_plants) out = lcf.scan_p.bind( *(consts + carry_vals + xs_vals + plant_vals), reverse=reverse, length=length, jaxpr=new_body_jaxpr, num_consts=len(consts), num_carry=num_carry, linear=linear + (False,) * len(plant_vals), unroll=unroll) return out plant_custom_rules[lcf.scan_p] = _plant_scan_rule def _plant_while_rule(trace: HarvestTrace, *tracers, cond_jaxpr, body_jaxpr, cond_nconsts, body_nconsts): """Injects values into a while loop, overriding values for all iterations.""" cond_const_tracers, body_const_tracers, init_tracers = jax_util.split_list( tracers, [cond_nconsts, body_nconsts]) init_avals = tree_util.tree_map(lambda x: x.aval, init_tracers) cond_const_vals, body_const_vals, init_vals = tree_util.tree_map( lambda x: x.val, (cond_const_tracers, body_const_tracers, init_tracers)) context = trace_util.get_dynamic_context(trace) settings = context.settings body_metadata = _get_harvest_metadata(body_jaxpr, settings, *(body_const_tracers + init_tracers)) for k, meta in body_metadata.items(): mode = meta['mode'] if mode != 'clobber': raise ValueError( f'Must use clobber mode for \'{k}\' inside of a `while_loop`.') body_fun = jax_core.jaxpr_as_fun(body_jaxpr) plant_settings = dict( tag=settings.tag, allowlist=settings.allowlist, blocklist=settings.blocklist, exclusive=settings.exclusive) plants = context.plants def new_body(*carry): carry = plant(body_fun, **plant_settings)(plants, *(tuple(body_const_vals) + carry)) return carry in_tree = tree_util.tree_structure(init_avals) new_body_jaxpr, new_body_consts, _ = lcf._initial_style_jaxpr( # pylint: disable=protected-access new_body, in_tree, tuple(init_avals)) out = lcf.while_p.bind( *(cond_const_vals + new_body_consts + init_vals), cond_nconsts=len(cond_const_vals), body_nconsts=len(new_body_consts), cond_jaxpr=cond_jaxpr, body_jaxpr=new_body_jaxpr) return jax_util.safe_map(trace.pure, out) plant_custom_rules[lcf.while_p] = _plant_while_rule def _plant_cond_rule(trace, *tracers, branches, linear): """Injects the same values into both branches of a conditional.""" index_tracer, ops_tracers = tracers[0], tracers[1:] index_val, ops_vals = tree_util.tree_map(lambda x: x.val, (index_tracer, ops_tracers)) ops_avals = tree_util.tree_map(lambda x: x.aval, ops_tracers) context = trace_util.get_dynamic_context(trace) settings = context.settings plant_settings = dict( tag=settings.tag, allowlist=settings.allowlist, blocklist=settings.blocklist, exclusive=settings.exclusive) branch_metadatas = tuple( _get_harvest_metadata(branch, settings, *ops_tracers) for branch in branches) _check_branch_metadata(branch_metadatas) plants = context.plants branch_funs = tuple(map(jax_core.jaxpr_as_fun, branches)) planted_branches = tuple( functools.partial(plant(f, **plant_settings), plants) for f in branch_funs) in_tree = tree_util.tree_structure(ops_avals) new_branch_jaxprs, consts, _ = ( lcf._initial_style_jaxprs_with_common_consts( # pylint: disable=protected-access planted_branches, in_tree, ops_avals, lax.cond_p.name)) out = lax.cond_p.bind( index_val, *(tuple(consts) + ops_vals), branches=tuple(new_branch_jaxprs), linear=(False,) * len(tuple(consts) + linear)) return jax_util.safe_map(trace.pure, out) plant_custom_rules[lcf.cond_p] = _plant_cond_rule def harvest(f, *, tag: str, allowlist: Optional[Iterable[str]] = None, blocklist: Iterable[str] = frozenset(), exclusive: bool = False): kwargs = dict( tag=tag, allowlist=allowlist, blocklist=blocklist, exclusive=exclusive) return call_and_reap(plant(f, **kwargs), **kwargs)
# Created by Ryan van Huuksloot # Current Version: Version 3 ############################################################ # This section is for imports. ############################################################ import os import json import urllib from pprint import pprint import contextlib import datetime import time ############################################################ # Insert API Key here between quotations api_key = "" ############################################################ ########################################################### # This section is for Object Oriented Programming. ########################################################### class Show: def __init__(self, id_tag, name, currentSeason, currentEpisode): """This function stores object variables.""" self.name = name self.id_tag = id_tag self.currentSeason = currentSeason self.currentEpisode = currentEpisode def retrieveActualStatistics(self): """This function retrieves the latest season and episode number from the movie database.""" url_TMDB = mergeURL_ID(self.id_tag) JSONText = readWebPage(url_TMDB) today = datetime.date.today().isoformat() self.actualSeason = JSONText["number_of_seasons"] i = 0 for item in JSONText["seasons"]: if item["air_date"] <= today and item["air_date"] != None and item["season_number"] != 0: i += 1 self.actualSeason = i if JSONText["status"] == "Returning Series": returning = True elif JSONText["status"] == "Canceled": returning = False print "Cancelled : " + JSONText["name"] else: returning = False url_TMDB = mergeURL_ID_Season(self.id_tag, self.actualSeason) JSONText = readWebPage(url_TMDB) self.actualEpisode = 1 for item in JSONText["episodes"]: if item["air_date"] <= today and item["air_date"] != None: self.actualEpisode = item["episode_number"] try: self.actualEpisode = JSONText["episodes"][self.actualEpisode]["episode_number"] except IndexError: if returning: self.actualEpisode += 1 def compareStatistics(self): """This function compares the statistics of the shows that have been added to data to the movie database and returns the episode that you have to watch.""" check = True while check: if int(self.currentSeason) == self.actualSeason: if int(self.currentEpisode) < self.actualEpisode: print self.displayToWatch() check = False #change checkEpisode elif int(self.currentSeason) < self.actualSeason: if checkEpisode(self.id_tag, self.currentSeason, self.currentEpisode): print self.displayToWatch() check = False else: self.currentSeason = int(self.currentSeason) + 1 self.currentEpisode = 1 ## url_TMDB = mergeURL_ID_Season(self.id_tag, self.currentSeason) ## JSONText = readWebPage(url_TMDB) ## today = datetime.date.today().isoformat() ## for item in JSONText["episodes"]: ## if item["air_date"] <= today and item["air_date"] != None: ## tempEpisode = item["episode_number"] ## if self.currentEpisode <= tempEpisode: ## print self.displayToWatch() ## check = False ## else: ## self.currentSeason = int(self.currentSeason) + 1 ## self.currentEpisode = 1 ## filename = os.path.abspath(os.path.join("data\shows", self.id_tag))+".json" ## updateJSONFile(filename, self.id_tag) else: check = False def watched(self): self.currentEpisode = int(self.currentEpisode) + 1 self.compareStatistics() filename = os.path.abspath(os.path.join("data\shows", self.id_tag))+".json" updateJSONFile(filename, self.id_tag) def displayToWatch(self): """This function displays the current season and episode you have to watch.""" return "ID : " + str(self.id_tag) + " Name : " + self.name + " Season : " + str(self.currentSeason) + " Episode : " + str(self.currentEpisode) def displayShowCurrent(self): """This function displays the current season and episode you are on.""" return self.name + " [" + str(self.currentSeason) + "|" + str(self.currentEpisode) + "]" def displayShowActual(self): """This function displays the actual season and episode that the show is on.""" return self.name + " [" + str(self.actualSeason) + "|" + str(self.actualEpisode) + "]" ########################################################### # End of Object Oriented Programming Section. ########################################################### ########################################################### # This section is for JSON File Manipulation. ########################################################### def updateJSONFile(filename, id_tag = 1): """This function updates the JSON files that stores all of the data.""" JSONFile = open(filename, "r") data = json.load(JSONFile) JSONFile.close() if id_tag != 1: data["currentEpisode"] = Shows[id_tag].currentEpisode data["currentSeason"] = Shows[id_tag].currentSeason JSONFile = open(filename, "w+") JSONFile.write(json.dumps(data)) JSONFile.close() def readJSONFile(filename): """This function reads the data from the JSON files.""" JSONFile = open(filename, "r") data = json.load(JSONFile) JSONFile.close() return data def writeJSONFile(id_tag, data): filename = os.path.abspath(os.path.join("data\shows", id_tag))+".json" jsonFile = open(filename, "w+") jsonFile.write(json.dumps(data)) jsonFile.close() ########################################################### # End of JSON File Manipulation Section. ########################################################### Shows = {} def main(): try: print "Updating your TV Shows. One moment please!" print "="*50 i = 0 for filename in os.listdir('data\shows'): if i == 20: time.sleep(5.5) i = 0 else: i+=1 filename = os.path.abspath(os.path.join("data\shows", filename)) data = readJSONFile(filename) id_tag = data["id_tag"] name = data["name"] currentSeason = data["currentSeason"] currentEpisode = data["currentEpisode"] Shows[id_tag] = Show(id_tag, name, currentSeason, currentEpisode) try: Shows[id_tag].retrieveActualStatistics() except KeyError: pass try: ## print Shows[id_tag].displayShowCurrent(), Shows[id_tag].displayShowActual() Shows[id_tag].compareStatistics() except AttributeError: pass print "All your shows are updated!" print "="*50 check = True print "Which episodes have you watched? Input the ID Number one at a time. Type Esc to escape." while check: checkWatched = raw_input("ID Number : ").title() if checkWatched == "Esc": check = False elif checkWatched.isdigit(): try: Shows[checkWatched].watched() except KeyError: print "You didn't enter a valid ID!" except IOError: print "You currently don't have internet. Try again once you have internet!" def RyanUpdate(): """This function takes the data from a txt file in a very specific format and creates a JSON structure for the data within data coding the files based on their id_tag. The function then updates and compares the statistics.""" file_name = "shows.txt" full_file = os.path.abspath(os.path.join("data", file_name)) content = readData(full_file) print "Updating your TV Shows. One moment please!" for item in content: id_tag = item[:item.index(":")-1] name = item[item.index(":")+2:item.index("[")-1] currentSeason = item[item.index("[")+1:item.index("|")] currentEpisode = item[item.index("|")+1:item.index("]")] data = {"id_tag":id_tag, "name":name, "currentSeason":currentSeason, "currentEpisode":currentEpisode} writeJSONFile(id_tag, data) Shows[id_tag] = Show(id_tag, name, currentSeason, currentEpisode) Shows[id_tag].retrieveActualStatistics() Shows[id_tag].compareStatistics() print "All your shows have been updated!" def readData(filename): """This function opens and read the file required to get all the data to make the bingo cards (the individual items that will be called later. These items are added to an allData database.""" fileIn = open(filename, 'r') allData = [] line = fileIn.readline().strip() while line != "": # Checks to make sure the line isn't empty if line != "": allData.append(line) line = fileIn.readline().strip() fileIn.close() return allData def writeData(filename, data): """This function opens and writes to the file specified.""" fileIn = open(filename, 'w') for item in data: fileIn.write(item + " : " + data[item]) fileIn.close() print "ids are written to target location" ########################################################### # This section is for merging URLS. ########################################################### def mergeURL_Search(name, page=1): """This function takes a TV show name and a page number if provided and returns the corresponding TMDB url.""" url_TMDB = "http://api.themoviedb.org/3/search/tv?api_key=" url_query = "&query=" url_page = "&page=" return (url_TMDB + str(api_key) + url_query + name + url_page + str(page)) def mergeURL_ID(id_tag): """This function takes a TV show id and returns the basic url.""" url_TMDB = "http://api.themoviedb.org/3/tv/" url_api = "?api_key=" return (url_TMDB + str(id_tag) + url_api + str(api_key)) def mergeURL_ID_Season(id_tag, season): """This function takes a TV show id and season and returns the basic url including season.""" url_TMDB = "http://api.themoviedb.org/3/tv/" url_season = "/season/" url_api = "?api_key=" return (url_TMDB + str(id_tag) + url_season + str(season) + url_api + str(api_key)) ############################################################ # End of merging URLs section. ############################################################ def readWebPage(url_TMDB): with contextlib.closing(urllib.urlopen(url_TMDB)) as htmlFileIn: JSONText = json.load(htmlFileIn) return JSONText def chooseShow(name): """This function allows the user to view all of the results of the url search and choose which TV show is the correct one based on the search parameter name. This function returns the name and the TMDB id tag.""" url_TMDB = mergeURL_Search(name) singlePage = False while not singlePage: JSONText = readWebPage(url_TMDB) i = 0 for item in JSONText["results"]: print "="*50 print item["name"] if item["overview"] != "": print "-"*len(item["name"]) print item["overview"] print "="*50 userInput = raw_input("Is this your show? (Y/N): ").upper() if userInput == "Y": return item["name"], item["id"] i += 1 if i == 20 and (int(JSONText["page"]) < int(JSONText["total_pages"])): url_TMDB = mergeURL_Search(name, int(JSONText["page"]) + 1) singlePage = False else: singlePage = True return "name", "id" def checkSeason(id_tag, season): url_TMDB = mergeURL_ID(id_tag) JSONText = readWebPage(url_TMDB) if JSONText["number_of_seasons"] >= int(season): return True, False elif (JSONText["number_of_seasons"] + 1) == int(season) and JSONText["status"] == "Returning Series": return True, True else: return False, False def checkEpisode(id_tag, season, episode): url_TMDB = mergeURL_ID(id_tag) JSONText = readWebPage(url_TMDB) try: if JSONText["seasons"][int(season)-1]["episode_count"] >= int(episode): return True else: return False except KeyError: pass def addShow(): """This function adds a TV show to your current list of TV shows.""" # Checks TV Show name nameOK = False while not nameOK: name, id_tag = chooseShow(str(raw_input("Name of the show: ")).title()) if name != "name" and id_tag != "id": nameOK = True else: print "You didn't enter a valid show name or we could not find your TV show." # Checks to make sure that the show has this valid season seasonOK = False while not seasonOK: season = raw_input("What season are you currently on: ") if season.isdigit() and int(season) > 0: seasonOK, newSeason = checkSeason(id_tag, season) if not seasonOK: print "You didn't enter a valid season silly!" else: print "You didn't enter a valid number." # Checks to make sure that the TV show has this valid episode in this season if newSeason: episode = 1 print "You are on a new season. You will start with episode 1 when it comes out!" else: episodeOK = False while not episodeOK: episode = raw_input("What episode are you currently on: ") if episode.isdigit(): episodeOK = checkEpisode(id_tag, season, episode) if not episodeOK: print "You didn't enter a valid episode number." else: print "You didn't enter a valid episode number." data = {"id_tag":str(id_tag), "name":name, "currentSeason":season, "currentEpisode":episode} writeJSONFile(str(id_tag), data) def removeShow(): nameOK = False while not nameOK: name, id_tag = chooseShow(str(raw_input("Name of the show: ")).title()) if name != "name" and id_tag != "id": nameOK = True else: print "You didn't enter a valid show name or we could not find your TV show." filename = str(id_tag) + ".json" filename = os.path.abspath(os.path.join("data\shows", filename)) os.remove(filename) main()
from __future__ import absolute_import import six import warnings import time import logging from sentry import options from django.core.exceptions import SuspiciousOperation from collections import namedtuple from django.conf import settings from requests.exceptions import RequestException, Timeout, ReadTimeout from six.moves.urllib.parse import urlparse from sentry.models import EventError from sentry.exceptions import RestrictedIPAddress from sentry.utils.cache import cache from sentry.utils.hashlib import md5_text from sentry.utils.strings import truncatechars # Importing for backwards compatible API from sentry.net.socket import safe_socket_connect, is_valid_url, is_safe_hostname # NOQA from sentry.net.http import SafeSession logger = logging.getLogger(__name__) # TODO(dcramer): we want to change these to be constants so they are easier # to translate/link again # the maximum number of remote resources (i.e. sourc eifles) that should be # fetched MAX_URL_LENGTH = 150 # UrlResult.body **must** be bytes UrlResult = namedtuple("UrlResult", ["url", "headers", "body", "status", "encoding"]) # In case SSL is unavailable (light builds) we can't import this here. try: from OpenSSL.SSL import ZeroReturnError, Error as OpenSSLError except ImportError: class ZeroReturnError(Exception): pass class OpenSSLError(Exception): pass class BadSource(Exception): error_type = EventError.UNKNOWN_ERROR def __init__(self, data=None): if data is None: data = {} data.setdefault("type", self.error_type) super(BadSource, self).__init__(data["type"]) self.data = data class CannotFetch(BadSource): error_type = EventError.FETCH_GENERIC_ERROR def get_server_hostname(): return urlparse(options.get("system.url-prefix")).hostname build_session = SafeSession def safe_urlopen( url, method=None, params=None, data=None, json=None, headers=None, allow_redirects=False, timeout=30, verify_ssl=True, user_agent=None, ): """ A slightly safer version of ``urlib2.urlopen`` which prevents redirection and ensures the URL isn't attempting to hit a blacklisted IP range. """ if user_agent is not None: warnings.warn("user_agent is no longer used with safe_urlopen") session = SafeSession() kwargs = {} if json: kwargs["json"] = json if not headers: headers = {} headers.setdefault("Content-Type", "application/json") if data: kwargs["data"] = data if params: kwargs["params"] = params if headers: kwargs["headers"] = headers if method is None: method = "POST" if (data or json) else "GET" response = session.request( method=method, url=url, allow_redirects=allow_redirects, timeout=timeout, verify=verify_ssl, **kwargs ) return response def safe_urlread(response): return response.content def expose_url(url): if url is None: return u"<unknown>" if url[:5] == "data:": return u"<data url>" url = truncatechars(url, MAX_URL_LENGTH) if isinstance(url, six.binary_type): url = url.decode("utf-8", "replace") return url def fetch_file( url, domain_lock_enabled=True, outfile=None, headers=None, allow_redirects=True, verify_ssl=False, timeout=settings.SENTRY_SOURCE_FETCH_SOCKET_TIMEOUT, **kwargs ): """ Pull down a URL, returning a UrlResult object. """ # lock down domains that are problematic if domain_lock_enabled: domain = urlparse(url).netloc domain_key = "source:blacklist:v2:%s" % (md5_text(domain).hexdigest(),) domain_result = cache.get(domain_key) if domain_result: domain_result["url"] = url raise CannotFetch(domain_result) logger.debug("Fetching %r from the internet", url) http_session = SafeSession() response = None try: try: start = time.time() response = http_session.get( url, allow_redirects=allow_redirects, verify=verify_ssl, headers=headers, timeout=timeout, stream=True, **kwargs ) try: cl = int(response.headers["content-length"]) except (LookupError, ValueError): cl = 0 if cl > settings.SENTRY_SOURCE_FETCH_MAX_SIZE: raise OverflowError() return_body = False if outfile is None: outfile = six.BytesIO() return_body = True cl = 0 # Only need to even attempt to read the response body if we # got a 200 OK if response.status_code == 200: for chunk in response.iter_content(16 * 1024): if time.time() - start > settings.SENTRY_SOURCE_FETCH_TIMEOUT: raise Timeout() outfile.write(chunk) cl += len(chunk) if cl > settings.SENTRY_SOURCE_FETCH_MAX_SIZE: raise OverflowError() except Exception as exc: logger.debug("Unable to fetch %r", url, exc_info=True) if isinstance(exc, RestrictedIPAddress): error = {"type": EventError.RESTRICTED_IP, "url": expose_url(url)} elif isinstance(exc, SuspiciousOperation): error = {"type": EventError.SECURITY_VIOLATION, "url": expose_url(url)} elif isinstance(exc, (Timeout, ReadTimeout)): error = { "type": EventError.FETCH_TIMEOUT, "url": expose_url(url), "timeout": settings.SENTRY_SOURCE_FETCH_TIMEOUT, } elif isinstance(exc, OverflowError): error = { "type": EventError.FETCH_TOO_LARGE, "url": expose_url(url), # We want size in megabytes to format nicely "max_size": float(settings.SENTRY_SOURCE_FETCH_MAX_SIZE) / 1024 / 1024, } elif isinstance(exc, (RequestException, ZeroReturnError, OpenSSLError)): error = { "type": EventError.FETCH_GENERIC_ERROR, "value": six.text_type(type(exc)), "url": expose_url(url), } else: logger.exception(six.text_type(exc)) error = {"type": EventError.UNKNOWN_ERROR, "url": expose_url(url)} # TODO(dcramer): we want to be less aggressive on disabling domains if domain_lock_enabled: cache.set(domain_key, error or "", 300) logger.warning("source.disabled", extra=error) raise CannotFetch(error) headers = {k.lower(): v for k, v in response.headers.items()} encoding = response.encoding body = None if return_body: body = outfile.getvalue() outfile.close() # we only want to close StringIO result = (headers, body, response.status_code, encoding) finally: if response is not None: response.close() return UrlResult(url, result[0], result[1], result[2], result[3])
# Python test set -- built-in functions import test.support, unittest import sys import pickle import itertools # pure Python implementations (3 args only), for comparison def pyrange(start, stop, step): if (start - stop) // step < 0: # replace stop with next element in the sequence of integers # that are congruent to start modulo step. stop += (start - stop) % step while start != stop: yield start start += step def pyrange_reversed(start, stop, step): stop += (start - stop) % step return pyrange(stop - step, start - step, -step) class RangeTest(unittest.TestCase): def assert_iterators_equal(self, xs, ys, test_id, limit=None): # check that an iterator xs matches the expected results ys, # up to a given limit. if limit is not None: xs = itertools.islice(xs, limit) ys = itertools.islice(ys, limit) sentinel = object() pairs = itertools.zip_longest(xs, ys, fillvalue=sentinel) for i, (x, y) in enumerate(pairs): if x == y: continue elif x == sentinel: self.fail('{}: iterator ended unexpectedly ' 'at position {}; expected {}'.format(test_id, i, y)) elif y == sentinel: self.fail('{}: unexpected excess element {} at ' 'position {}'.format(test_id, x, i)) else: self.fail('{}: wrong element at position {};' 'expected {}, got {}'.format(test_id, i, y, x)) def test_range(self): self.assertEqual(list(range(3)), [0, 1, 2]) self.assertEqual(list(range(1, 5)), [1, 2, 3, 4]) self.assertEqual(list(range(0)), []) self.assertEqual(list(range(-3)), []) self.assertEqual(list(range(1, 10, 3)), [1, 4, 7]) self.assertEqual(list(range(5, -5, -3)), [5, 2, -1, -4]) a = 10 b = 100 c = 50 self.assertEqual(list(range(a, a+2)), [a, a+1]) self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1]) self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2]) seq = list(range(a, b, c)) self.assertIn(a, seq) self.assertNotIn(b, seq) self.assertEqual(len(seq), 2) seq = list(range(b, a, -c)) self.assertIn(b, seq) self.assertNotIn(a, seq) self.assertEqual(len(seq), 2) seq = list(range(-a, -b, -c)) self.assertIn(-a, seq) self.assertNotIn(-b, seq) self.assertEqual(len(seq), 2) self.assertRaises(TypeError, range) self.assertRaises(TypeError, range, 1, 2, 3, 4) self.assertRaises(ValueError, range, 1, 2, 0) self.assertRaises(TypeError, range, 0.0, 2, 1) self.assertRaises(TypeError, range, 1, 2.0, 1) self.assertRaises(TypeError, range, 1, 2, 1.0) self.assertRaises(TypeError, range, 1e100, 1e101, 1e101) self.assertRaises(TypeError, range, 0, "spam") self.assertRaises(TypeError, range, 0, 42, "spam") self.assertEqual(len(range(0, sys.maxsize, sys.maxsize-1)), 2) r = range(-sys.maxsize, sys.maxsize, 2) self.assertEqual(len(r), sys.maxsize) def test_large_operands(self): x = range(10**20, 10**20+10, 3) self.assertEqual(len(x), 4) self.assertEqual(len(list(x)), 4) x = range(10**20+10, 10**20, 3) self.assertEqual(len(x), 0) self.assertEqual(len(list(x)), 0) self.assertFalse(x) x = range(10**20, 10**20+10, -3) self.assertEqual(len(x), 0) self.assertEqual(len(list(x)), 0) self.assertFalse(x) x = range(10**20+10, 10**20, -3) self.assertEqual(len(x), 4) self.assertEqual(len(list(x)), 4) self.assertTrue(x) # Now test range() with longs for x in [range(-2**100), range(0, -2**100), range(0, 2**100, -1)]: self.assertEqual(list(x), []) self.assertFalse(x) a = int(10 * sys.maxsize) b = int(100 * sys.maxsize) c = int(50 * sys.maxsize) self.assertEqual(list(range(a, a+2)), [a, a+1]) self.assertEqual(list(range(a+2, a, -1)), [a+2, a+1]) self.assertEqual(list(range(a+4, a, -2)), [a+4, a+2]) seq = list(range(a, b, c)) self.assertIn(a, seq) self.assertNotIn(b, seq) self.assertEqual(len(seq), 2) self.assertEqual(seq[0], a) self.assertEqual(seq[-1], a+c) seq = list(range(b, a, -c)) self.assertIn(b, seq) self.assertNotIn(a, seq) self.assertEqual(len(seq), 2) self.assertEqual(seq[0], b) self.assertEqual(seq[-1], b-c) seq = list(range(-a, -b, -c)) self.assertIn(-a, seq) self.assertNotIn(-b, seq) self.assertEqual(len(seq), 2) self.assertEqual(seq[0], -a) self.assertEqual(seq[-1], -a-c) def test_large_range(self): # Check long ranges (len > sys.maxsize) # len() is expected to fail due to limitations of the __len__ protocol def _range_len(x): try: length = len(x) except OverflowError: step = x[1] - x[0] length = 1 + ((x[-1] - x[0]) // step) return length a = -sys.maxsize b = sys.maxsize expected_len = b - a x = range(a, b) self.assertIn(a, x) self.assertNotIn(b, x) self.assertRaises(OverflowError, len, x) self.assertTrue(x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+idx) self.assertEqual(x[idx:idx+1][0], a+idx) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] a = 0 b = 2 * sys.maxsize expected_len = b - a x = range(a, b) self.assertIn(a, x) self.assertNotIn(b, x) self.assertRaises(OverflowError, len, x) self.assertTrue(x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+idx) self.assertEqual(x[idx:idx+1][0], a+idx) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] a = 0 b = sys.maxsize**10 c = 2*sys.maxsize expected_len = 1 + (b - a) // c x = range(a, b, c) self.assertIn(a, x) self.assertNotIn(b, x) self.assertRaises(OverflowError, len, x) self.assertTrue(x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+(idx*c)) self.assertEqual(x[idx:idx+1][0], a+(idx*c)) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] a = sys.maxsize**10 b = 0 c = -2*sys.maxsize expected_len = 1 + (b - a) // c x = range(a, b, c) self.assertIn(a, x) self.assertNotIn(b, x) self.assertRaises(OverflowError, len, x) self.assertTrue(x) self.assertEqual(_range_len(x), expected_len) self.assertEqual(x[0], a) idx = sys.maxsize+1 self.assertEqual(x[idx], a+(idx*c)) self.assertEqual(x[idx:idx+1][0], a+(idx*c)) with self.assertRaises(IndexError): x[-expected_len-1] with self.assertRaises(IndexError): x[expected_len] def test_invalid_invocation(self): self.assertRaises(TypeError, range) self.assertRaises(TypeError, range, 1, 2, 3, 4) self.assertRaises(ValueError, range, 1, 2, 0) a = int(10 * sys.maxsize) self.assertRaises(ValueError, range, a, a + 1, int(0)) self.assertRaises(TypeError, range, 1., 1., 1.) self.assertRaises(TypeError, range, 1e100, 1e101, 1e101) self.assertRaises(TypeError, range, 0, "spam") self.assertRaises(TypeError, range, 0, 42, "spam") # Exercise various combinations of bad arguments, to check # refcounting logic self.assertRaises(TypeError, range, 0.0) self.assertRaises(TypeError, range, 0, 0.0) self.assertRaises(TypeError, range, 0.0, 0) self.assertRaises(TypeError, range, 0.0, 0.0) self.assertRaises(TypeError, range, 0, 0, 1.0) self.assertRaises(TypeError, range, 0, 0.0, 1) self.assertRaises(TypeError, range, 0, 0.0, 1.0) self.assertRaises(TypeError, range, 0.0, 0, 1) self.assertRaises(TypeError, range, 0.0, 0, 1.0) self.assertRaises(TypeError, range, 0.0, 0.0, 1) self.assertRaises(TypeError, range, 0.0, 0.0, 1.0) def test_index(self): u = range(2) self.assertEqual(u.index(0), 0) self.assertEqual(u.index(1), 1) self.assertRaises(ValueError, u.index, 2) u = range(-2, 3) self.assertEqual(u.count(0), 1) self.assertEqual(u.index(0), 2) self.assertRaises(TypeError, u.index) class BadExc(Exception): pass class BadCmp: def __eq__(self, other): if other == 2: raise BadExc() return False a = range(4) self.assertRaises(BadExc, a.index, BadCmp()) a = range(-2, 3) self.assertEqual(a.index(0), 2) self.assertEqual(range(1, 10, 3).index(4), 1) self.assertEqual(range(1, -10, -3).index(-5), 2) self.assertEqual(range(10**20).index(1), 1) self.assertEqual(range(10**20).index(10**20 - 1), 10**20 - 1) self.assertRaises(ValueError, range(1, 2**100, 2).index, 2**87) self.assertEqual(range(1, 2**100, 2).index(2**87+1), 2**86) class AlwaysEqual(object): def __eq__(self, other): return True always_equal = AlwaysEqual() self.assertEqual(range(10).index(always_equal), 0) def test_user_index_method(self): bignum = 2*sys.maxsize smallnum = 42 # User-defined class with an __index__ method class I: def __init__(self, n): self.n = int(n) def __index__(self): return self.n self.assertEqual(list(range(I(bignum), I(bignum + 1))), [bignum]) self.assertEqual(list(range(I(smallnum), I(smallnum + 1))), [smallnum]) # User-defined class with a failing __index__ method class IX: def __index__(self): raise RuntimeError self.assertRaises(RuntimeError, range, IX()) # User-defined class with an invalid __index__ method class IN: def __index__(self): return "not a number" self.assertRaises(TypeError, range, IN()) # Test use of user-defined classes in slice indices. self.assertEqual(range(10)[:I(5)], range(5)) with self.assertRaises(RuntimeError): range(0, 10)[:IX()] with self.assertRaises(TypeError): range(0, 10)[:IN()] def test_count(self): self.assertEqual(range(3).count(-1), 0) self.assertEqual(range(3).count(0), 1) self.assertEqual(range(3).count(1), 1) self.assertEqual(range(3).count(2), 1) self.assertEqual(range(3).count(3), 0) self.assertIs(type(range(3).count(-1)), int) self.assertIs(type(range(3).count(1)), int) self.assertEqual(range(10**20).count(1), 1) self.assertEqual(range(10**20).count(10**20), 0) self.assertEqual(range(3).index(1), 1) self.assertEqual(range(1, 2**100, 2).count(2**87), 0) self.assertEqual(range(1, 2**100, 2).count(2**87+1), 1) class AlwaysEqual(object): def __eq__(self, other): return True always_equal = AlwaysEqual() self.assertEqual(range(10).count(always_equal), 10) self.assertEqual(len(range(sys.maxsize, sys.maxsize+10)), 10) def test_repr(self): self.assertEqual(repr(range(1)), 'range(0, 1)') self.assertEqual(repr(range(1, 2)), 'range(1, 2)') self.assertEqual(repr(range(1, 2, 3)), 'range(1, 2, 3)') def test_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: with self.subTest(proto=proto, test=t): r = range(*t) self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))), list(r)) def test_iterator_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: it = itorg = iter(range(*t)) data = list(range(*t)) d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(list(it), data) it = pickle.loads(d) try: next(it) except StopIteration: continue d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(list(it), data[1:]) def test_exhausted_iterator_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): r = range(2**65, 2**65+2) i = iter(r) while True: r = next(i) if r == 2**65+1: break d = pickle.dumps(i, proto) i2 = pickle.loads(d) self.assertEqual(list(i), []) self.assertEqual(list(i2), []) def test_large_exhausted_iterator_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): r = range(20) i = iter(r) while True: r = next(i) if r == 19: break d = pickle.dumps(i, proto) i2 = pickle.loads(d) self.assertEqual(list(i), []) self.assertEqual(list(i2), []) def test_odd_bug(self): # This used to raise a "SystemError: NULL result without error" # because the range validation step was eating the exception # before NULL was returned. with self.assertRaises(TypeError): range([], 1, -1) def test_types(self): # Non-integer objects *equal* to any of the range's items are supposed # to be contained in the range. self.assertIn(1.0, range(3)) self.assertIn(True, range(3)) self.assertIn(1+0j, range(3)) class C1: def __eq__(self, other): return True self.assertIn(C1(), range(3)) # Objects are never coerced into other types for comparison. class C2: def __int__(self): return 1 def __index__(self): return 1 self.assertNotIn(C2(), range(3)) # ..except if explicitly told so. self.assertIn(int(C2()), range(3)) # Check that the range.__contains__ optimization is only # used for ints, not for instances of subclasses of int. class C3(int): def __eq__(self, other): return True self.assertIn(C3(11), range(10)) self.assertIn(C3(11), list(range(10))) def test_strided_limits(self): r = range(0, 101, 2) self.assertIn(0, r) self.assertNotIn(1, r) self.assertIn(2, r) self.assertNotIn(99, r) self.assertIn(100, r) self.assertNotIn(101, r) r = range(0, -20, -1) self.assertIn(0, r) self.assertIn(-1, r) self.assertIn(-19, r) self.assertNotIn(-20, r) r = range(0, -20, -2) self.assertIn(-18, r) self.assertNotIn(-19, r) self.assertNotIn(-20, r) def test_empty(self): r = range(0) self.assertNotIn(0, r) self.assertNotIn(1, r) r = range(0, -10) self.assertNotIn(0, r) self.assertNotIn(-1, r) self.assertNotIn(1, r) def test_range_iterators(self): # exercise 'fast' iterators, that use a rangeiterobject internally. # see issue 7298 limits = [base + jiggle for M in (2**32, 2**64) for base in (-M, -M//2, 0, M//2, M) for jiggle in (-2, -1, 0, 1, 2)] test_ranges = [(start, end, step) for start in limits for end in limits for step in (-2**63, -2**31, -2, -1, 1, 2)] for start, end, step in test_ranges: iter1 = range(start, end, step) iter2 = pyrange(start, end, step) test_id = "range({}, {}, {})".format(start, end, step) # check first 100 entries self.assert_iterators_equal(iter1, iter2, test_id, limit=100) iter1 = reversed(range(start, end, step)) iter2 = pyrange_reversed(start, end, step) test_id = "reversed(range({}, {}, {}))".format(start, end, step) self.assert_iterators_equal(iter1, iter2, test_id, limit=100) @test.support.cpython_only def test_range_iterator_invocation(self): import _testcapi rangeiter_type = type(iter(range(0))) # rangeiter_new doesn't take keyword arguments with self.assertRaises(TypeError): rangeiter_type(a=1) # rangeiter_new takes exactly 3 arguments self.assertRaises(TypeError, rangeiter_type) self.assertRaises(TypeError, rangeiter_type, 1) self.assertRaises(TypeError, rangeiter_type, 1, 1) self.assertRaises(TypeError, rangeiter_type, 1, 1, 1, 1) # start, stop and stop must fit in C long for good_val in [_testcapi.LONG_MAX, _testcapi.LONG_MIN]: rangeiter_type(good_val, good_val, good_val) for bad_val in [_testcapi.LONG_MAX + 1, _testcapi.LONG_MIN - 1]: self.assertRaises(OverflowError, rangeiter_type, bad_val, 1, 1) self.assertRaises(OverflowError, rangeiter_type, 1, bad_val, 1) self.assertRaises(OverflowError, rangeiter_type, 1, 1, bad_val) # step mustn't be zero self.assertRaises(ValueError, rangeiter_type, 1, 1, 0) def test_slice(self): def check(start, stop, step=None): i = slice(start, stop, step) self.assertEqual(list(r[i]), list(r)[i]) self.assertEqual(len(r[i]), len(list(r)[i])) for r in [range(10), range(0), range(1, 9, 3), range(8, 0, -3), range(sys.maxsize+1, sys.maxsize+10), ]: check(0, 2) check(0, 20) check(1, 2) check(20, 30) check(-30, -20) check(-1, 100, 2) check(0, -1) check(-1, -3, -1) def test_contains(self): r = range(10) self.assertIn(0, r) self.assertIn(1, r) self.assertIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) r = range(9, -1, -1) self.assertIn(0, r) self.assertIn(1, r) self.assertIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) r = range(0, 10, 2) self.assertIn(0, r) self.assertNotIn(1, r) self.assertNotIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) r = range(9, -1, -2) self.assertNotIn(0, r) self.assertIn(1, r) self.assertIn(5.0, r) self.assertNotIn(5.1, r) self.assertNotIn(-1, r) self.assertNotIn(10, r) self.assertNotIn("", r) def test_reverse_iteration(self): for r in [range(10), range(0), range(1, 9, 3), range(8, 0, -3), range(sys.maxsize+1, sys.maxsize+10), ]: self.assertEqual(list(reversed(r)), list(r)[::-1]) def test_issue11845(self): r = range(*slice(1, 18, 2).indices(20)) values = {None, 0, 1, -1, 2, -2, 5, -5, 19, -19, 20, -20, 21, -21, 30, -30, 99, -99} for i in values: for j in values: for k in values - {0}: r[i:j:k] def test_comparison(self): test_ranges = [range(0), range(0, -1), range(1, 1, 3), range(1), range(5, 6), range(5, 6, 2), range(5, 7, 2), range(2), range(0, 4, 2), range(0, 5, 2), range(0, 6, 2)] test_tuples = list(map(tuple, test_ranges)) # Check that equality of ranges matches equality of the corresponding # tuples for each pair from the test lists above. ranges_eq = [a == b for a in test_ranges for b in test_ranges] tuples_eq = [a == b for a in test_tuples for b in test_tuples] self.assertEqual(ranges_eq, tuples_eq) # Check that != correctly gives the logical negation of == ranges_ne = [a != b for a in test_ranges for b in test_ranges] self.assertEqual(ranges_ne, [not x for x in ranges_eq]) # Equal ranges should have equal hashes. for a in test_ranges: for b in test_ranges: if a == b: self.assertEqual(hash(a), hash(b)) # Ranges are unequal to other types (even sequence types) self.assertIs(range(0) == (), False) self.assertIs(() == range(0), False) self.assertIs(range(2) == [0, 1], False) # Huge integers aren't a problem. self.assertEqual(range(0, 2**100 - 1, 2), range(0, 2**100, 2)) self.assertEqual(hash(range(0, 2**100 - 1, 2)), hash(range(0, 2**100, 2))) self.assertNotEqual(range(0, 2**100, 2), range(0, 2**100 + 1, 2)) self.assertEqual(range(2**200, 2**201 - 2**99, 2**100), range(2**200, 2**201, 2**100)) self.assertEqual(hash(range(2**200, 2**201 - 2**99, 2**100)), hash(range(2**200, 2**201, 2**100))) self.assertNotEqual(range(2**200, 2**201, 2**100), range(2**200, 2**201 + 1, 2**100)) # Order comparisons are not implemented for ranges. with self.assertRaises(TypeError): range(0) < range(0) with self.assertRaises(TypeError): range(0) > range(0) with self.assertRaises(TypeError): range(0) <= range(0) with self.assertRaises(TypeError): range(0) >= range(0) def test_attributes(self): # test the start, stop and step attributes of range objects self.assert_attrs(range(0), 0, 0, 1) self.assert_attrs(range(10), 0, 10, 1) self.assert_attrs(range(-10), 0, -10, 1) self.assert_attrs(range(0, 10, 1), 0, 10, 1) self.assert_attrs(range(0, 10, 3), 0, 10, 3) self.assert_attrs(range(10, 0, -1), 10, 0, -1) self.assert_attrs(range(10, 0, -3), 10, 0, -3) def assert_attrs(self, rangeobj, start, stop, step): self.assertEqual(rangeobj.start, start) self.assertEqual(rangeobj.stop, stop) self.assertEqual(rangeobj.step, step) with self.assertRaises(AttributeError): rangeobj.start = 0 with self.assertRaises(AttributeError): rangeobj.stop = 10 with self.assertRaises(AttributeError): rangeobj.step = 1 with self.assertRaises(AttributeError): del rangeobj.start with self.assertRaises(AttributeError): del rangeobj.stop with self.assertRaises(AttributeError): del rangeobj.step if __name__ == "__main__": unittest.main()
from pytest import raises from .env import NInt, get_session, key from .env import session from sider.types import Set from sider.transaction import Transaction from sider.exceptions import CommitError S = frozenset IntSet = Set(NInt) def test_iterate(session): set_ = session.set(key('test_set_iterate'), S('abc'), Set) assert S(['a', 'b', 'c']) == S(set_) setx = session.set(key('test_setx_iterate'), S([1, 2, 3]), IntSet) assert S([1, 2, 3]) == S(setx) def test_length(session): set_ = session.set(key('test_set_length'), S('abc'), Set) assert len(set_) == 3 setx = session.set(key('test_setx_length'), S([1, 2, 3]), IntSet) assert len(setx) == 3 def test_contains(session): set_ = session.set(key('test_set_contains'), S('abc'), Set) assert 'a' in set_ assert 'd' not in set_ setx = session.set(key('test_setx_contains'), S([1, 2, 3]), IntSet) assert 1 in setx assert 4 not in setx assert '1' not in setx assert '4' not in setx def test_equals(session): set_ = session.set(key('test_set_equals'), S('abc'), Set) set2 = session.set(key('test_set_equals2'), S('abc'), Set) set3 = session.set(key('test_set_equals3'), S('abcd'), Set) set4 = session.set(key('test_set_equals4'), S([1, 2, 3]), IntSet) emptyset = session.set(key('test_set_equals5'), S(), Set) emptyset2 = session.set(key('test_set_equals5'), S(), IntSet) assert set_ == set('abc') assert set_ != set('abcd') assert set_ == S('abc') assert set_ != S('abcd') assert set_ == set2 and set2 == set_ assert set_ != set3 and set3 != set_ assert set_ != set4 and set4 != set_ assert emptyset == emptyset2 and emptyset2 == emptyset def test_isdisjoint(session): set_ = session.set(key('test_set_isdisjoint'), S('abc'), Set) setj = session.set(key('test_set_isdisjoint2'), S('cde'), Set) setd = session.set(key('test_set_isdisjoint3'), S('def1'), Set) assert not set_.isdisjoint('cde') assert set_.isdisjoint('def') assert not set_.isdisjoint(S('cde')) assert set_.isdisjoint(S('def')) assert not set_.isdisjoint(setj) assert set_.isdisjoint(setd) assert not setj.isdisjoint(set_) assert setd.isdisjoint(set_) setx = session.set(key('test_setx_isdisjoint'), S([1, 2, 3]), IntSet) setxj = session.set(key('test_setx_isdisjoint2'), S([3, 4, 5]), IntSet) setxd = session.set(key('test_setx_isdisjoint3'), S([4, 5, 6]), IntSet) assert not setx.isdisjoint([3, 4, 5]) assert setx.isdisjoint([4, 5, 6]) assert not setx.isdisjoint(S([3, 4, 5])) assert setx.isdisjoint(S([4, 5, 6])) assert not setx.isdisjoint(setxj) assert setx.isdisjoint(setxd) assert not setxj.isdisjoint(setx) assert setxd.isdisjoint(setx) # mismatched value_type NInt vs. Bulk: assert setd.isdisjoint(setx) assert setx.isdisjoint(setd) assert setd.isdisjoint(setxj) assert setd.isdisjoint(setxd) assert setxj.isdisjoint(setd) assert setxd.isdisjoint(setd) def test_issubset(session): test_sets = {Set(): 'abcdefg', Set(NInt): range(1, 8)} fixtures = {} for value_type, members in test_sets.items(): typeid = str(hash(value_type)) d = list(members) e = d[1:-1] f = e[1:-1] g = S(d) h = S(e) i = S(f) a = session.set(key('test_set_issubset_a' + typeid), g, value_type) b = session.set(key('test_set_issubset_b' + typeid), h, value_type) c = session.set(key('test_set_issubset_c' + typeid), i, value_type) fixtures[value_type] = a, b, c assert c.issubset(a) and c <= a and c < a assert c.issubset(b) and c <= b and c < b assert c.issubset(c) and c <= c and not (c < c) assert c.issubset(d) assert c.issubset(e) assert c.issubset(f) assert c.issubset(g) and c <= g and c < g assert c.issubset(h) and c <= h and c < h assert c.issubset(i) and c <= i and not (c < i) assert b.issubset(a) and b <= a and b < a assert b.issubset(b) and b <= b and not (b < b) assert not b.issubset(c) and not (a <= c) and not (a < c) assert b.issubset(d) assert b.issubset(e) assert not b.issubset(f) assert b.issubset(g) and b <= g and b < g assert b.issubset(h) and b <= h and not (b < h) assert not b.issubset(i) and not (b <= i) and not (b < i) assert a.issubset(a) and a <= a and not (a < a) assert not a.issubset(b) and not (a <= b) and not (a < b) assert not a.issubset(c) and not (a <= c) and not (a < c) assert a.issubset(d) assert not a.issubset(e) assert not a.issubset(f) assert a.issubset(g) and a <= g and not (a < g) assert not a.issubset(h) and not (a <= h) and not (a < h) assert not a.issubset(i) and not (a <= i) and not (a < i) with raises(TypeError): a <= d with raises(TypeError): a <= e with raises(TypeError): a <= f with raises(TypeError): a < d with raises(TypeError): a < e with raises(TypeError): a < f assert not fixtures[Set()][0].issubset(fixtures[Set(NInt)][0]) assert not fixtures[Set()][0].issubset(fixtures[Set(NInt)][1]) assert not fixtures[Set()][0].issubset(fixtures[Set(NInt)][2]) def test_issuperset(session): test_sets = {Set(): 'abcdefg', Set(NInt): range(1, 8)} fixtures = {} for value_type, members in test_sets.items(): typeid = str(hash(value_type)) f = list(members) e = f[1:-1] d = e[1:-1] g = S(d) h = S(e) i = S(f) a = session.set(key('test_set_issuperset_a' + typeid), g, value_type) b = session.set(key('test_set_issuperset_b' + typeid), h, value_type) c = session.set(key('test_set_issuperset_c' + typeid), i, value_type) fixtures[value_type] = a, b, c assert c.issuperset(a) and c >= a and c > a assert c.issuperset(b) and c >= b and c > b assert c.issuperset(c) and c >= c and not (c > c) assert c.issuperset(d) assert c.issuperset(e) assert c.issuperset(f) assert c.issuperset(g) and c >= g and c > g assert c.issuperset(h) and c >= h and c > h assert c.issuperset(i) and c >= i and not (c > i) assert b.issuperset(a) and b >= a and b > a assert b.issuperset(b) and b >= b and not (b > b) assert not b.issuperset(c) and not (a >= c) and not (a > c) assert b.issuperset(d) assert b.issuperset(e) assert not b.issuperset(f) assert b.issuperset(g) and b >= g and b > g assert b.issuperset(h) and b >= h and not (b > h) assert not b.issuperset(i) and not (b >= i) and not (b > i) assert a.issuperset(a) and a >= a and not (a > a) assert not a.issuperset(b) and not (a >= b) and not (a > b) assert not a.issuperset(c) and not (a >= c) and not (a > c) assert a.issuperset(d) assert not a.issuperset(e) assert not a.issuperset(f) assert a.issuperset(g) and a >= g and not (a > g) assert not a.issuperset(h) and not (a >= h) and not (a > h) assert not a.issuperset(i) and not (a >= i) and not (a > i) with raises(TypeError): a >= d with raises(TypeError): a >= e with raises(TypeError): a >= f with raises(TypeError): a > d with raises(TypeError): a > e with raises(TypeError): a > f assert not fixtures[Set()][0].issuperset(fixtures[Set(NInt)][0]) assert not fixtures[Set()][0].issuperset(fixtures[Set(NInt)][1]) assert not fixtures[Set()][0].issuperset(fixtures[Set(NInt)][2]) def test_difference(session): set_ = session.set(key('test_set_difference'), S('abcd'), Set) set2 = session.set(key('test_set_difference2'), S('bde1'), Set) set3 = session.set(key('test_set_difference3'), S('az'), Set) assert set_.difference() == S('abcd') assert set_.difference(set2) == S('ac') assert set_.difference(set2, set3) == S('c') assert set_.difference(set2, 'az') == S('c') assert set_.difference(set2, S('az')) == S('c') assert set_.difference('bdef') == S('ac') assert set_.difference('bdef', set3) == S('c') assert set_.difference('bdef', 'az') == S('c') assert set_.difference('bdef', S('az')) == S('c') assert set_.difference(S('bdef')) == S('ac') assert set_.difference(S('bdef'), set3) == S('c') assert set_.difference(S('bdef'), 'az') == S('c') assert set_.difference(S('bdef'), S('az')) == S('c') assert set_ - set2 == S('ac') assert set_ - S('bdef') == S('ac') assert S('bdef') - set_ == S('ef') with raises(TypeError): set_ - 'bdef' setx = session.set(key('test_setx_difference'), S([1, 2, 3, 4]), IntSet) sety = session.set(key('test_setx_difference2'), S([2, 4, 5, 6]), IntSet) setz = session.set(key('test_setx_difference3'), S([1, 7]), IntSet) assert setx.difference() == S([1, 2, 3, 4]) assert setx.difference(sety) == S([1, 3]) assert setx.difference(sety, setz) == S([3]) assert setx.difference(sety, [1, 7]) == S([3]) assert setx.difference(sety, S([1, 7])) == S([3]) assert setx.difference([2, 4, 5, 6]) == S([1, 3]) assert setx.difference([2, 4, 5, 6], setz) == S([3]) assert setx.difference([2, 4, 5, 6], [1, 7]) == S([3]) assert setx.difference([2, 4, 5, 6], S([1, 7])) == S([3]) assert setx.difference(S([2, 4, 5, 6])) == S([1, 3]) assert setx.difference(S([2, 4, 5, 6]), [1, 7]) == S([3]) assert setx.difference(S([2, 4, 5, 6]), S([1, 7])) == S([3]) assert setx - sety == S([1, 3]) assert setx - S([2, 4, 5, 6]) == S([1, 3]) assert S([2, 4, 5, 6]) - setx == S([5, 6]) with raises(TypeError): setx - [2, 4, 5, 6] # mismatched value_type NInt vs. Bulk: assert set2 == set2.difference(setx) assert set2 == set2.difference(setx, setz) assert setx == setx.difference(set2) assert setx == setx.difference(set2, set3) def test_symmetric_difference(session): set_ = session.set(key('test_set_symmdiff'), S('abcd'), Set) set2 = session.set(key('test_set_symmdiff2'), S('bde1'), Set) assert set_.symmetric_difference(set2) == S('ace1') assert set_.symmetric_difference('bdef') == S('acef') assert set_.symmetric_difference(S('bdef')) == S('acef') assert set_ ^ set2 == S('ace1') assert set_ ^ S('bdef') == S('acef') assert S('bdef') ^ set_ == S('acef') with raises(TypeError): set_ ^ 'bdef' setx = session.set(key('test_setx_symmdiff'), S([1, 2, 3, 4]), IntSet) sety = session.set(key('test_setx_symmdiff2'), S([2, 4, 5, 6]), IntSet) assert setx.symmetric_difference(sety) == S([1, 3, 5, 6]) assert setx.symmetric_difference([2, 4, 5, 6]) == S([1, 3, 5, 6]) assert setx.symmetric_difference(S([2, 4, 5, 6])) == S([1, 3, 5, 6]) assert setx ^ sety == S([1, 3, 5, 6]) assert setx ^ S([2, 4, 5, 6]) == S([1, 3, 5, 6]) assert S([2, 4, 5, 6]) ^ setx == S([1, 3, 5, 6]) with raises(TypeError): setx ^ [2, 4, 5, 6] # mismatched value_type NInt vs. Bulk: assert setx.union(set2) == setx.symmetric_difference(set2) assert set2.union(setx) == set2.symmetric_difference(setx) def test_union(session): set_ = session.set(key('test_set_union'), S('abc'), Set) set2 = session.set(key('test_set_union2'), S('cde'), Set) set3 = session.set(key('test_set_union3'), S('def'), Set) assert set_.union('cde') == S('abcde') assert set_.union('cde', 'def') == S('abcdef') assert set_.union(S('cde')) == S('abcde') assert set_.union(S('cde'), 'def') == S('abcdef') assert set_.union(S('cde'), S('def')) == S('abcdef') assert set_.union(set2) == S('abcde') assert set_.union(set2, set3) == S('abcdef') assert set_.union(set2, set3, 'adfg') == S('abcdefg') assert set_ | S('cde') == S('abcde') assert set_ | set2 == S('abcde') assert S('cde') | set_ == S('abcde') with raises(TypeError): set_ | 'cde' setx = session.set(key('test_setx_union'), S([1, 2, 3]), IntSet) sety = session.set(key('test_setx_union2'), S([3, 4, 5]), IntSet) setz = session.set(key('test_setx_union3'), S([4, 5, 6]), IntSet) assert setx.union([3, 4, 5]) == S([1, 2, 3, 4, 5]) assert setx.union([3, 4, 5], [4, 5, 6]) == S([1, 2, 3, 4, 5, 6]) assert setx.union(S([3, 4, 5])) == S([1, 2, 3, 4, 5]) assert setx.union(S([3, 4, 5]), [4, 5, 6]) == S([1, 2, 3, 4, 5, 6]) assert setx.union(S([3, 4, 5]), S([4, 5, 6])) == S([1, 2, 3, 4, 5, 6]) assert setx.union(sety) == S([1, 2, 3, 4, 5]) assert setx.union(sety, setz) == S([1, 2, 3, 4, 5, 6]) assert setx.union(sety, setz, [1, 4, 6, 7]) == S([1, 2, 3, 4, 5, 6, 7]) assert setx | S([3, 4, 5]) == S([1, 2, 3, 4, 5]) assert setx | sety == S([1, 2, 3, 4, 5]) assert S([3, 4, 5]) | setx == S([1, 2, 3, 4, 5]) with raises(TypeError): setx | [3, 4, 5] assert set_.union(setx) == S(['a', 'b', 'c', 1, 2, 3]) assert set_.union(setx, sety) == S(['a', 'b', 'c', 1, 2, 3, 4, 5]) assert (set_.union(set2, setx, sety) == S(['a', 'b', 'c', 'd', 'e', 1, 2, 3, 4, 5])) assert set_ | setx == S(['a', 'b', 'c', 1, 2, 3]) def test_intersection(session): set_ = session.set(key('test_set_intersection'), S('abc'), Set) set2 = session.set(key('test_set_intersection2'), S('bcd'), Set) set3 = session.set(key('test_set_intersection3'), S('bef'), Set) assert set_.intersection('bcde') == S('bc') assert set_.intersection('bcde', 'cdef') == S('c') assert set_.intersection(S('bcde')) == S('bc') assert set_.intersection(S('bcde'), 'cdef') == S('c') assert set_.intersection(S('bcde'), S('cdef')) == S('c') assert set_.intersection(set2) == S('bc') assert set_.intersection(set2, set3) == S('b') assert set_.intersection(set2, set3, 'bcfg') == S('b') assert set_.intersection(set2, set3, 'acfg') == S() assert set_ & S('bcd') == S('bc') assert set_ & set2 == S('bc') assert S('bcd') & set_ == S('bc') with raises(TypeError): set_ & 'cde' setx = session.set(key('test_setx_intersection'), S([1, 2, 3]), IntSet) sety = session.set(key('test_setx_intersection2'), S([2, 3, 4]), IntSet) setz = session.set(key('test_setx_intersection3'), S([1, 2, 5]), IntSet) assert setx.intersection([2, 3, 4]) == S([2, 3]) assert setx.intersection([2, 3, 4], [1, 2, 5]) == S([2]) assert setx.intersection(S([2, 3, 4])) == S([2, 3]) assert setx.intersection(S([2, 3, 4]), [1, 2, 5]) == S([2]) assert setx.intersection(S([2, 3, 4]), S([1, 2, 5])) == S([2]) assert setx.intersection(sety) == S([2, 3]) assert setx.intersection(sety, setz) == S([2]) assert setx.intersection(sety, setz, [1, 2, 5]) == S([2]) assert setx & S([2, 3, 4]) == S([2, 3]) assert setx & sety == S([2, 3]) assert S([2, 3, 4]) & setx == S([2, 3]) with raises(TypeError): setx & [3, 4, 5] assert set_.intersection(setx) == S([]) assert set_.intersection(setx, sety) == S([]) assert set_.intersection(set2, setx, sety) == S([]) assert set_ & setx == S([]) def test_add(session): set_ = session.set(key('test_set_add'), S('abc'), Set) set_.add('d') assert set_ == S('abcd') set_.add('d') assert set_ == S('abcd') with raises(TypeError): set_.add(1) setx = session.set(key('test_setx_add'), S([1, 2, 3]), IntSet) setx.add(4) assert setx == S([1, 2, 3, 4]) setx.add(4) assert setx == S([1, 2, 3, 4]) with raises(TypeError): setx.add('a') def test_discard(session): set_ = session.set(key('test_set_discard'), S('abc'), Set) set_.discard('a') assert set_ == S('bc') set_.discard('a') assert set_ == S('bc') set_.discard(1) assert set_ == S('bc') setx = session.set(key('test_setx_discard'), S([1, 2, 3]), IntSet) setx.discard(1) assert setx == S([2, 3]) setx.discard(1) assert setx == S([2, 3]) setx.discard('a') assert setx == S([2, 3]) def test_pop(session): expected = set('abc') set_ = session.set(key('test_set_pop'), expected, Set) popped = set_.pop() assert popped in expected expected.remove(popped) assert set_ == expected popped = set_.pop() assert popped in expected expected.remove(popped) assert set_ == expected popped = set_.pop() assert popped in expected assert len(set_) == 0 expected.remove(popped) assert len(expected) == 0 with raises(KeyError): set_.pop() def test_pop_t(session): session2 = get_session() expected = set('abc') keyid = key('test_set_pop_t') set_ = session.set(keyid, expected, Set) setx = session2.get(keyid, Set) with Transaction(session, [keyid]): card = len(set_) assert card == 3 popped = set_.pop() assert setx == expected assert popped in expected expected.remove(popped) assert set_ == set(setx) == expected with Transaction(session, [keyid]): card = len(set_) assert card == 2 popped = set_.pop() assert setx == expected assert popped in expected expected.remove(popped) assert set_ == set(setx) == expected with Transaction(session, [keyid]): card = len(set_) assert card == 1 popped = set_.pop() assert setx == expected assert popped in expected assert len(set_) == len(setx) == 0 expected.remove(popped) assert len(expected) == 0 with Transaction(session, [keyid]): with raises(KeyError): set_.pop() def test_clear(session): set_ = session.set(key('test_set_clear'), S('abc'), Set) set_.clear() assert len(set_) == 0 def test_update(session): def reset(): return session.set(key('test_set_update'), S('abc'), Set) set_ = reset() set2 = session.set(key('test_set_update2'), S('cde'), Set) set3 = session.set(key('test_set_update3'), S('def'), Set) set_.update('cde') assert set_ == S('abcde') reset() set_.update('cde', 'def') assert set_ == S('abcdef') reset() set_.update(S('cde')) assert set_ == S('abcde') reset() set_.update(S('cde'), 'def') assert set_ == S('abcdef') reset() set_.update(S('cde'), S('def')) assert set_ == S('abcdef') reset() set_.update(set2) assert set_ == S('abcde') reset() set_.update(set2, set3) assert set_ == S('abcdef') reset() set_.update(set2, set3, 'adfg') assert set_ == S('abcdefg') reset() set_ |= S('cde') assert set_ == S('abcde') reset() set_ |= set2 assert set_ == S('abcde') with raises(TypeError): set_ |= 'cde' def resetx(): return session.set(key('test_setx_union'), S([1, 2, 3]), IntSet) setx = resetx() sety = session.set(key('test_setx_union2'), S([3, 4, 5]), IntSet) setz = session.set(key('test_setx_union3'), S([4, 5, 6]), IntSet) setx.update([3, 4, 5]) assert setx == S([1, 2, 3, 4, 5]) resetx() setx.update([3, 4, 5], [4, 5, 6]) assert setx == S([1, 2, 3, 4, 5, 6]) resetx() setx.update(S([3, 4, 5])) assert setx == S([1, 2, 3, 4, 5]) resetx() setx.update(S([3, 4, 5]), [4, 5, 6]) assert setx == S([1, 2, 3, 4, 5, 6]) resetx() setx.update(S([3, 4, 5]), S([4, 5, 6])) assert setx == S([1, 2, 3, 4, 5, 6]) resetx() setx.update(sety) assert setx == S([1, 2, 3, 4, 5]) resetx() setx.update(sety, setz) assert setx == S([1, 2, 3, 4, 5, 6]) resetx() setx.update(sety, setz, [1, 4, 6, 7]) assert setx == S([1, 2, 3, 4, 5, 6, 7]) resetx() setx |= S([3, 4, 5]) assert setx == S([1, 2, 3, 4, 5]) resetx() setx |= sety assert setx == S([1, 2, 3, 4, 5]) with raises(TypeError): setx |= [3, 4, 5] with raises(TypeError): set_.update(setx) with raises(TypeError): set_ |= setx == S(['a', 'b', 'c', 1, 2, 3]) def test_massive_update(session): huge_data = set('{0}'.format(i) for i in range(1010)) set_ = session.get(key('test_set_massive_update'), Set) set_.update(huge_data) assert set(set_) == huge_data def test_update_t(session): session2 = get_session() keyid = key('test_set_update_t') keyid2 = key('test_set_update_t2') def reset(): return session.set(keyid, S('abc'), Set) set_ = reset() set2 = session.set(keyid2, S('cde'), Set) setx = session2.get(keyid, Set) with Transaction(session, [keyid]): card = len(set_) assert card == 3 set_.update('cde') assert setx == S('abc') with raises(CommitError): len(set_) assert set_ == S(setx) == S('abcde') set_ = reset() with Transaction(session, [keyid]): card = len(set_) assert card == 3 set_.update(set2) assert setx == S('abc') with raises(CommitError): len(set_) assert set_ == S(setx) == S('abcde') set_ = reset() with Transaction(session, [keyid]): card = len(set_) assert card == 3 set_.update(set2, 'adfg') assert setx == S('abc') with raises(CommitError): len(set_) assert set_ == S(setx) == S('abcdefg') def test_intersection_update(session): def reset(): return session.set(key('test_set_intersection_update'), S('abc'), Set) set_ = reset() set2 = session.set(key('test_set_intersection_update2'), S('bcd'), Set) set3 = session.set(key('test_set_intersection_update3'), S('bef'), Set) set_.intersection_update('bcde') assert set_ == S('bc') reset() set_.intersection_update('bcde', 'cdef') assert set_ == S('c') reset() set_.intersection_update(S('bcde')) assert set_ == S('bc') reset() set_.intersection_update(S('bcde'), 'cdef') assert set_ == S('c') reset() set_.intersection_update(S('bcde'), S('cdef')) assert set_ == S('c') reset() set_.intersection_update(set2) assert set_ == S('bc') reset() set_.intersection_update(set2, set3) assert set_ == S('b') reset() set_.intersection_update(set2, set3, 'bcfg') assert set_ == S('b') reset() set_.intersection_update(set2, set3, 'acfg') assert set_ == S() reset() set_ &= S('bcd') assert set_ == S('bc') reset() set_ &= set2 assert set_ == S('bc') reset() with raises(TypeError): set_ &= 'cde' def resetx(): return session.set(key('test_setx_intersection_update'), S([1, 2, 3]), IntSet) setx = resetx() sety = session.set(key('test_setx_intersection_update2'), S([2, 3, 4]), IntSet) setz = session.set(key('test_setx_intersection_update3'), S([1, 2, 5]), IntSet) setx.intersection_update([2, 3, 4]) assert setx == S([2, 3]) resetx() setx.intersection_update([2, 3, 4], [1, 2, 5]) assert setx == S([2]) resetx() setx.intersection_update(S([2, 3, 4])) assert setx == S([2, 3]) resetx() setx.intersection_update(S([2, 3, 4]), [1, 2, 5]) assert setx == S([2]) resetx() setx.intersection_update(S([2, 3, 4]), S([1, 2, 5])) assert setx == S([2]) resetx() setx.intersection_update(sety) assert setx == S([2, 3]) resetx() setx.intersection_update(sety, setz) assert setx == S([2]) resetx() setx.intersection_update(sety, setz, [1, 2, 5]) assert setx == S([2]) resetx() setx &= S([2, 3, 4]) assert setx == S([2, 3]) resetx() setx &= sety assert setx == S([2, 3]) resetx() with raises(TypeError): setx &= [3, 4, 5] resetx() set_.intersection_update(setx) assert set_ == S([]) resetx() set_.intersection_update(setx, sety) assert set_ == S([]) resetx() set_.intersection_update(set2, setx, sety) assert set_ == S([]) def test_intersection_update_t(session): session2 = get_session() keyid = key('test_set_intersection_update_t') keyid2 = key('test_set_intersection_update_t2') set_ = session.set(keyid, S('abc'), Set) set2 = session.set(keyid2, S('bcd'), Set) setx = session2.get(keyid, Set) with Transaction(session, [keyid, keyid2]): card = len(set_) assert card == 3 set_.intersection_update(set2) assert setx == S('abc') assert set_ == S(setx) == S('bc') with Transaction(session, [keyid, keyid2]): set_.intersection_update(set2) with raises(CommitError): len(set_) def test_difference_update(session): def reset(): return session.set(key('test_set_difference_update'), S('abcd'), Set) set_ = reset() set2 = session.set(key('test_set_difference_update2'), S('bde1'), Set) set3 = session.set(key('test_set_difference_update3'), S('az'), Set) set_.difference_update() assert set_ == S('abcd') reset() set_.difference_update(set2) assert set_ == S('ac') reset() set_.difference_update(set2, set3) assert set_ == S('c') reset() set_.difference_update(set2, 'az') assert set_ == S('c') reset() set_.difference_update(set2, S('az')) assert set_ == S('c') reset() set_.difference_update('bdef') assert set_ == S('ac') reset() set_.difference_update('bdef', set3) assert set_ == S('c') reset() set_.difference_update('bdef', 'az') assert set_ == S('c') reset() set_.difference_update('bdef', S('az')) assert set_ == S('c') reset() set_.difference_update(S('bdef')) assert set_ == S('ac') reset() set_.difference_update(S('bdef'), set3) assert set_ == S('c') reset() set_.difference_update(S('bdef'), 'az') assert set_ == S('c') reset() set_.difference_update(S('bdef'), S('az')) assert set_ == S('c') reset() set_ -= set2 assert set_ == S('ac') reset() set_ -= S('bdef') assert set_ == S('ac') reset() with raises(TypeError): set_ -= 'bdef' def resetx(): return session.set(key('test_setx_difference_update'), S([1, 2, 3, 4]), IntSet) setx = resetx() sety = session.set(key('test_setx_difference_update2'), S([2, 4, 5, 6]), IntSet) setz = session.set(key('test_setx_difference_update3'), S([1, 7]), IntSet) setx.difference_update() assert setx == S([1, 2, 3, 4]) resetx() setx.difference_update(sety) assert setx == S([1, 3]) resetx() setx.difference_update(sety, setz) assert setx == S([3]) resetx() setx.difference_update(sety, [1, 7]) assert setx == S([3]) resetx() setx.difference_update(sety, S([1, 7])) assert setx == S([3]) resetx() setx.difference_update([2, 4, 5, 6]) assert setx == S([1, 3]) resetx() setx.difference_update([2, 4, 5, 6], setz) assert setx == S([3]) resetx() setx.difference_update([2, 4, 5, 6], [1, 7]) assert setx == S([3]) resetx() setx.difference_update([2, 4, 5, 6], S([1, 7])) assert setx == S([3]) resetx() setx.difference_update(S([2, 4, 5, 6])) assert setx == S([1, 3]) resetx() setx.difference_update(S([2, 4, 5, 6]), [1, 7]) assert setx == S([3]) resetx() setx.difference_update(S([2, 4, 5, 6]), S([1, 7])) assert setx == S([3]) resetx() setx.difference_update(['1', '2', 3]) assert setx == S([1, 2, 4]) resetx() setx -= sety assert setx == S([1, 3]) resetx() setx -= S([2, 4, 5, 6]) assert setx == S([1, 3]) resetx() with raises(TypeError): setx - [2, 4, 5, 6] # mismatched value_type NInt vs. Bulk: reset() resetx() set2.difference_update(setx) assert set2 == S('bde1') reset() set2.difference_update(setx, setz) assert set2 == S('bde1') reset() resetx() setx.difference_update(set2) assert setx == S([1, 2, 3, 4]) resetx() setx.difference_update(set2, set3) assert setx == S([1, 2, 3, 4]) def test_difference_update_t(session): session2 = get_session() keyid = key('test_set_difference_update_t') keyid2 = key('test_set_difference_update_t2') set_ = session.set(keyid, S('abcd'), Set) set2 = session.set(keyid2, S('bde1'), Set) setx = session2.get(keyid, Set) with Transaction(session, [keyid, keyid2]): card = len(set_) assert card == 4 set_.difference_update(set2) assert setx == S('abcd') assert set_ == S(setx) == S('ac') with Transaction(session, [keyid, keyid2]): set_.difference_update(set2) with raises(CommitError): len(set_) def test_symmetric_difference_update(session): def reset(): return session.set(key('test_set_symmdiff'), S('abcd'), Set) set_ = reset() set2 = session.set(key('test_set_symmdiff2'), S('bde1'), Set) set_.symmetric_difference_update(set2) assert set_ == S('ace1') reset() set_.symmetric_difference_update('bdef') assert set_ == S('acef') reset() set_.symmetric_difference_update(S('bdef')) assert set_ == S('acef') reset() set_ ^= set2 assert set_ == S('ace1') reset() set_ ^= S('bdef') assert set_ == S('acef') reset() with raises(TypeError): set_ ^= 'bdef' def resetx(): return session.set(key('test_setx_symmdiff'), S([1, 2, 3, 4]), IntSet) setx = resetx() sety = session.set(key('test_setx_symmdiff2'), S([2, 4, 5, 6]), IntSet) setx.symmetric_difference_update(sety) assert setx == S([1, 3, 5, 6]) resetx() setx.symmetric_difference_update([2, 4, 5, 6]) assert setx == S([1, 3, 5, 6]) resetx() setx.symmetric_difference_update(S([2, 4, 5, 6])) assert setx == S([1, 3, 5, 6]) resetx() setx ^= sety assert setx == S([1, 3, 5, 6]) resetx() setx ^= S([2, 4, 5, 6]) assert setx == S([1, 3, 5, 6]) with raises(TypeError): setx ^= [2, 4, 5, 6] # mismatched value_type NInt vs. Bulk: resetx() with raises(TypeError): setx.symmetric_difference_update(set2) reset() with raises(TypeError): set2.symmetric_difference_update(setx) def test_symmetric_difference_update_t(session): session2 = get_session() keyid = key('test_set_symmdiff_t') keyid2 = key('test_set_symmdiff_t2') set_ = session.set(keyid, S('abcd'), Set) set2 = session.set(keyid2, S('bde1'), Set) setx = session2.get(keyid, Set) with Transaction(session, [keyid, keyid2]): card = len(set_) assert card == 4 set_.symmetric_difference_update(set2) assert setx == S('abcd') assert set_ == S(setx) == S('ace1') with Transaction(session, [keyid, keyid2]): set_.symmetric_difference_update(set2) with raises(CommitError): len(set_) def test_repr(session): keyid = key('test_set_repr') set_ = session.set(keyid, set([1, 2, 3]), IntSet) assert '<sider.set.Set (' + repr(keyid) + ') {1, 2, 3}>' == repr(set_)
import theano from theano import tensor as T from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from theano.tensor.signal import pool from theano.tensor.nnet import conv3d2d from theano.printing import Print try: import theano.sparse as th_sparse_module except ImportError: th_sparse_module = None try: from theano.tensor.nnet.nnet import softsign as T_softsign except ImportError: from theano.sandbox.softsign import softsign as T_softsign import inspect import numpy as np from .common import _FLOATX, _EPSILON, _IMAGE_DIM_ORDERING py_all = all # INTERNAL UTILS theano.config.floatX = _FLOATX _LEARNING_PHASE = T.scalar(dtype='uint8', name='keras_learning_phase') # 0 = test, 1 = train def learning_phase(): # False = test, True = train return _LEARNING_PHASE def set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}: raise ValueError('Expected learning phase to be ' '0 or 1.') _LEARNING_PHASE = value # VARIABLE MANIPULATION def _assert_sparse_module(): if not th_sparse_module: raise ImportError("Failed to import theano.sparse\n" "You probably need to pip install nose-parameterized") def is_sparse(tensor): return th_sparse_module and isinstance(tensor.type, th_sparse_module.SparseType) def to_dense(tensor): if is_sparse(tensor): return th_sparse_module.dense_from_sparse(tensor) else: return tensor def variable(value, dtype=_FLOATX, name=None): '''Instantiate a tensor variable. ''' if hasattr(value, 'tocoo'): _assert_sparse_module() return th_sparse_module.as_sparse_variable(value) else: value = np.asarray(value, dtype=dtype) return theano.shared(value=value, name=name, strict=False) def placeholder(shape=None, ndim=None, dtype=_FLOATX, sparse=False, name=None): '''Instantiate an input data placeholder variable. ''' if shape is None and ndim is None: raise Exception('Specify either a shape or ndim value.') if shape is not None: ndim = len(shape) else: shape = tuple([None for _ in range(ndim)]) broadcast = (False,) * ndim if sparse: _assert_sparse_module() x = th_sparse_module.csr_matrix(name=name, dtype=dtype) else: x = T.TensorType(dtype, broadcast)(name) x._keras_shape = shape x._uses_learning_phase = False return x def shape(x): '''Return the shape of a tensor. Warning: type returned will be different for Theano backend (Theano tensor type) and TF backend (TF TensorShape). ''' return x.shape def ndim(x): return x.ndim def dtype(x): return x.dtype def eval(x): '''Run a graph. ''' return to_dense(x).eval() def zeros(shape, dtype=_FLOATX, name=None): '''Instantiate an all-zeros variable. ''' return variable(np.zeros(shape), dtype, name) def ones(shape, dtype=_FLOATX, name=None): '''Instantiate an all-ones variable. ''' return variable(np.ones(shape), dtype, name) def eye(size, dtype=_FLOATX, name=None): '''Instantiate an identity matrix. ''' return variable(np.eye(size), dtype, name) def ones_like(x): return T.ones_like(x) def zeros_like(x): return T.zeros_like(x) def random_uniform_variable(shape, low, high, dtype=_FLOATX, name=None): return variable(np.random.uniform(low=low, high=high, size=shape), dtype=dtype, name=name) def random_normal_variable(shape, mean, scale, dtype=_FLOATX, name=None): return variable(np.random.normal(loc=0.0, scale=scale, size=shape), dtype=dtype, name=name) def count_params(x): '''Return number of scalars in a tensor. Return: numpy integer. ''' return np.prod(x.shape.eval()) def cast(x, dtype): return T.cast(x, dtype) # UPDATES OPS def update(x, new_x): return (x, new_x) def update_add(x, increment): return (x, x + increment) def update_sub(x, decrement): return (x, x - decrement) def moving_average_update(variable, value, momentum): return (variable, variable * momentum + value * (1. - momentum)) # LINEAR ALGEBRA ''' Assumed overridden: +, -, /, *, +=, -=, *=, /= ''' def dot(x, y): if is_sparse(x): return th_sparse_module.basic.structured_dot(x, y) else: return T.dot(x, y) def batch_dot(x, y, axes=None): '''Batchwise dot product. batch_dot results in a tensor with less dimensions than the input. If the number of dimensions is reduced to 1, we use `expand_dims` to make sure that ndim is at least 2. # Arguments x, y: tensors with ndim >= 2 axes: list (or single) int with target dimensions # Returns A tensor with shape equal to the concatenation of x's shape (less the dimension that was summed over) and y's shape (less the batch dimension and the dimension that was summed over). If the final rank is 1, we reshape it to (batch_size, 1). # Examples Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]] batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal of x.dot(y.T), although we never have to calculate the off-diagonal elements. Shape inference: Let x's shape be (100, 20) and y's shape be (100, 30, 20). If dot_axes is (1, 2), to find the output shape of resultant tensor, loop through each dimension in x's shape and y's shape: x.shape[0] : 100 : append to output shape x.shape[1] : 20 : do not append to output shape, dimension 1 of x has been summed over. (dot_axes[0] = 1) y.shape[0] : 100 : do not append to output shape, always ignore first dimension of y y.shape[1] : 30 : append to output shape y.shape[2] : 20 : do not append to output shape, dimension 2 of y has been summed over. (dot_axes[1] = 2) output_shape = (100, 30) ''' if type(axes) == int: axes = (axes, axes) if axes is None: # behaves like tf.batch_matmul as default axes = [x.ndim - 1, y.ndim - 2] out = T.batched_tensordot(x, y, axes=axes) if ndim(out) == 1: out = expand_dims(out, 1) return out def transpose(x): return T.transpose(x) def gather(reference, indices): '''reference: a tensor. indices: an int tensor of indices. Return: a tensor of same type as reference. ''' return reference[indices] # ELEMENT-WISE OPERATIONS def max(x, axis=None, keepdims=False): return T.max(x, axis=axis, keepdims=keepdims) def min(x, axis=None, keepdims=False): return T.min(x, axis=axis, keepdims=keepdims) def sum(x, axis=None, keepdims=False): '''Sum of the values in a tensor, alongside the specified axis. ''' return T.sum(x, axis=axis, keepdims=keepdims) def prod(x, axis=None, keepdims=False): '''Multiply the values in a tensor, alongside the specified axis. ''' return T.prod(x, axis=axis, keepdims=keepdims) def mean(x, axis=None, keepdims=False): dtype = None if 'int' in x.dtype: dtype = _FLOATX return T.mean(x, axis=axis, keepdims=keepdims, dtype=dtype) def std(x, axis=None, keepdims=False): return T.std(x, axis=axis, keepdims=keepdims) def var(x, axis=None, keepdims=False): return T.var(x, axis=axis, keepdims=keepdims) def any(x, axis=None, keepdims=False): '''Bitwise reduction (logical OR). ''' return T.any(x, axis=axis, keepdims=keepdims) def all(x, axis=None, keepdims=False): '''Bitwise reduction (logical AND). ''' return T.all(x, axis=axis, keepdims=keepdims) def argmax(x, axis=-1): return T.argmax(x, axis=axis, keepdims=False) def argmin(x, axis=-1): return T.argmin(x, axis=axis, keepdims=False) def square(x): return T.sqr(x) def abs(x): return T.abs_(x) def sqrt(x): x = T.clip(x, 0., np.inf) return T.sqrt(x) def exp(x): return T.exp(x) def log(x): return T.log(x) def round(x): return T.round(x) def sign(x): return T.sgn(x) def pow(x, a): return T.pow(x, a) def clip(x, min_value, max_value): if max_value < min_value: max_value = min_value return T.clip(x, min_value, max_value) def equal(x, y): return T.eq(x, y) def not_equal(x, y): return T.neq(x, y) def greater(x, y): return T.gt(x, y) def greater_equal(x, y): return T.ge(x, y) def lesser(x, y): return T.lt(x, y) def lesser_equal(x, y): return T.le(x, y) def maximum(x, y): return T.maximum(x, y) def minimum(x, y): return T.minimum(x, y) def sin(x): return T.sin(x) def cos(x): return T.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.0001): '''Compute mean and std for batch then apply batch_normalization on batch. ''' var = x.var(reduction_axes) mean = x.mean(reduction_axes) target_shape = [] for axis in range(ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(x.shape[axis]) target_shape = T.stack(*target_shape) broadcast_mean = T.reshape(mean, target_shape) broadcast_var = T.reshape(var, target_shape) broadcast_beta = T.reshape(beta, target_shape) broadcast_gamma = T.reshape(gamma, target_shape) normed = batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def batch_normalization(x, mean, var, beta, gamma, epsilon=0.0001): '''Apply batch normalization on x given mean, var, beta and gamma. ''' ndim = x.ndim dev = theano.config.device use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu')) if use_cudnn: try: return theano.sandbox.cuda.dnn.dnn_batch_normalization_test(x, gamma, beta, mean, var, 'spatial', epsilon) except AttributeError: pass return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon), mode='high_mem') # SHAPE OPERATIONS def concatenate(tensors, axis=-1): if py_all([is_sparse(x) for x in tensors]): axis = axis % ndim(tensors[0]) if axis == 0: return th_sparse_module.basic.vstack(tensors, format='csr') elif axis == 1: return th_sparse_module.basic.hstack(tensors, format='csr') else: raise Exception('Invalid concat axis for sparse matrix: ' + axis) else: return T.concatenate([to_dense(x) for x in tensors], axis=axis) def reshape(x, shape): return T.reshape(x, shape) def permute_dimensions(x, pattern): '''Transpose dimensions. pattern should be a tuple or list of dimension indices, e.g. [0, 2, 1]. ''' pattern = tuple(pattern) return x.dimshuffle(pattern) def repeat_elements(x, rep, axis): '''Repeat the elements of a tensor along an axis, like np.repeat. If x has shape (s1, s2, s3) and axis=1, the output will have shape (s1, s2 * rep, s3). ''' return T.repeat(x, rep, axis=axis) def resize_images(X, height_factor, width_factor, dim_ordering): '''Resize the images contained in a 4D tensor of shape - [batch, channels, height, width] (for 'th' dim_ordering) - [batch, height, width, channels] (for 'tf' dim_ordering) by a factor of (height_factor, width_factor). Both factors should be positive integers. ''' if dim_ordering == 'th': output = repeat_elements(X, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output elif dim_ordering == 'tf': output = repeat_elements(X, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return output else: raise Exception('Invalid dim_ordering: ' + dim_ordering) def resize_volumes(X, depth_factor, height_factor, width_factor, dim_ordering): '''Resize the volume contained in a 5D tensor of shape - [batch, channels, depth, height, width] (for 'th' dim_ordering) - [batch, depth, height, width, channels] (for 'tf' dim_ordering) by a factor of (depth_factor, height_factor, width_factor). Both factors should be positive integers. ''' if dim_ordering == 'th': output = repeat_elements(X, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif dim_ordering == 'tf': output = repeat_elements(X, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise Exception('Invalid dim_ordering: ' + dim_ordering) def repeat(x, n): '''Repeat a 2D tensor. If x has shape (samples, dim) and n=2, the output will have shape (samples, 2, dim). ''' assert x.ndim == 2 x = x.dimshuffle((0, 'x', 1)) return T.extra_ops.repeat(x, n, axis=1) def tile(x, n): return T.tile(x, n) def flatten(x): return T.flatten(x) def batch_flatten(x): '''Turn a n-D tensor into a 2D tensor where the first dimension is conserved. ''' x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0])) return x def expand_dims(x, dim=-1): '''Add a 1-sized dimension at index "dim". ''' pattern = [i for i in range(x.type.ndim)] if dim < 0: if x.type.ndim == 0: dim = 0 else: dim = dim % x.type.ndim + 1 pattern.insert(dim, 'x') return x.dimshuffle(pattern) def squeeze(x, axis): '''Remove a 1-dimension from the tensor at index "axis". ''' shape = list(x.shape) shape.pop(axis) return T.reshape(x, tuple(shape)) def temporal_padding(x, padding=1): '''Pad the middle dimension of a 3D tensor with "padding" zeros left and right. Apologies for the inane API, but Theano makes this really hard. ''' input_shape = x.shape output_shape = (input_shape[0], input_shape[1] + 2 * padding, input_shape[2]) output = T.zeros(output_shape) return T.set_subtensor(output[:, padding:x.shape[1] + padding, :], x) def spatial_2d_padding(x, padding=(1, 1), dim_ordering='th'): '''Pad the 2nd and 3rd dimensions of a 4D tensor with "padding[0]" and "padding[1]" (resp.) zeros left and right. ''' input_shape = x.shape if dim_ordering == 'th': output_shape = (input_shape[0], input_shape[1], input_shape[2] + 2 * padding[0], input_shape[3] + 2 * padding[1]) output = T.zeros(output_shape) indices = (slice(None), slice(None), slice(padding[0], input_shape[2] + padding[0]), slice(padding[1], input_shape[3] + padding[1])) elif dim_ordering == 'tf': output_shape = (input_shape[0], input_shape[1] + 2 * padding[0], input_shape[2] + 2 * padding[1], input_shape[3]) output = T.zeros(output_shape) indices = (slice(None), slice(padding[0], input_shape[1] + padding[0]), slice(padding[1], input_shape[2] + padding[1]), slice(None)) else: raise Exception('Invalid dim_ordering: ' + dim_ordering) return T.set_subtensor(output[indices], x) def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='th'): '''Pad the 2nd, 3rd and 4th dimensions of a 5D tensor with "padding[0]", "padding[1]" and "padding[2]" (resp.) zeros left and right. ''' input_shape = x.shape if dim_ordering == 'th': output_shape = (input_shape[0], input_shape[1], input_shape[2] + 2 * padding[0], input_shape[3] + 2 * padding[1], input_shape[4] + 2 * padding[2]) output = T.zeros(output_shape) indices = (slice(None), slice(None), slice(padding[0], input_shape[2] + padding[0]), slice(padding[1], input_shape[3] + padding[1]), slice(padding[2], input_shape[4] + padding[2])) elif dim_ordering == 'tf': output_shape = (input_shape[0], input_shape[1] + 2 * padding[0], input_shape[2] + 2 * padding[1], input_shape[3] + 2 * padding[2], input_shape[4]) output = T.zeros(output_shape) indices = (slice(None), slice(padding[0], input_shape[1] + padding[0]), slice(padding[1], input_shape[2] + padding[1]), slice(padding[2], input_shape[3] + padding[2]), slice(None)) else: raise Exception('Invalid dim_ordering: ' + dim_ordering) return T.set_subtensor(output[indices], x) def pack(x): return T.stack(*x) def one_hot(indices, nb_classes): '''Input: nD integer tensor of shape (batch_size, dim1, dim2, ... dim(n-1)) Output: (n + 1)D one hot representation of the input with shape (batch_size, dim1, dim2, ... dim(n-1), nb_classes) ''' input_shape = tuple((indices.shape[i] for i in range(indices.ndim))) indices = T.flatten(indices) oh = T.extra_ops.to_one_hot(indices, nb_classes) oh = T.reshape(oh, input_shape + (nb_classes,)) return oh def reverse(x, axes): '''Reverse a tensor along the the specified axes ''' if type(axes) == int: axes = [axes] slices = [slice(None, None, -1) if i in axes else slice(None, None, None) for i in range(x.ndim)] return x[slices] # VALUE MANIPULATION def get_value(x): if not hasattr(x, 'get_value'): raise Exception("'get_value() can only be called on a variable. " + "If you have an expression instead, use eval().") return x.get_value() def batch_get_value(xs): '''Returns the value of more than one tensor variable, as a list of Numpy arrays. ''' return [get_value(x) for x in xs] def set_value(x, value): x.set_value(np.asarray(value, dtype=x.dtype)) def batch_set_value(tuples): for x, value in tuples: x.set_value(np.asarray(value, dtype=x.dtype)) def get_variable_shape(x): return x.get_value(borrow=True, return_internal_type=True).shape def print_tensor(x, message=''): '''Print the message and the tensor when evaluated and return the same tensor. ''' p_op = Print(message) return p_op(x) # GRAPH MANIPULATION class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.function = theano.function(inputs, outputs, updates=updates, allow_input_downcast=True, on_unused_input='ignore', **kwargs) def __call__(self, inputs): assert type(inputs) in {list, tuple} return self.function(*inputs) def function(inputs, outputs, updates=[], **kwargs): if len(kwargs) > 0: function_args = inspect.getargspec(theano.function)[0] for key in kwargs.keys(): if key not in function_args: msg = "Invalid argument '%s' passed to K.function" % key raise ValueError(msg) return Function(inputs, outputs, updates=updates, **kwargs) def gradients(loss, variables): return T.grad(loss, variables) def stop_gradient(variables): '''Returns `variables` but with zero gradient with respect to every other variables. ''' return theano.gradient.disconnected_grad(variables) # CONTROL FLOW def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): '''Iterates over the time dimension of a tensor. # Arguments inputs: tensor of temporal data of shape (samples, time, ...) (at least 3D). step_function: Parameters: input: tensor with shape (samples, ...) (no time dimension), representing input for the batch of samples at a certain time step. states: list of tensors. Returns: output: tensor with shape (samples, ...) (no time dimension), new_states: list of tensors, same length and shapes as 'states'. initial_states: tensor with shape (samples, ...) (no time dimension), containing the initial values for the states used in the step function. go_backwards: boolean. If True, do the iteration over the time dimension in reverse order. mask: binary tensor with shape (samples, time), with a zero for every element that is masked. constants: a list of constant values passed at each step. unroll: whether to unroll the RNN or to use a symbolic loop (`scan`). input_length: must be specified if using `unroll`. # Returns A tuple (last_output, outputs, new_states). last_output: the latest output of the rnn, of shape (samples, ...) outputs: tensor with shape (samples, time, ...) where each entry outputs[s, t] is the output of the step function at time t for sample s. new_states: list of tensors, latest states returned by the step function, of shape (samples, ...). ''' ndim = inputs.ndim assert ndim >= 3, 'Input should be at least 3D.' if unroll: if input_length is None: raise Exception('When specifying `unroll=True`, an `input_length` ' 'must be provided to `rnn`.') axes = [1, 0] + list(range(2, ndim)) inputs = inputs.dimshuffle(axes) if constants is None: constants = [] if mask is not None: if mask.ndim == ndim-1: mask = expand_dims(mask) assert mask.ndim == ndim mask = mask.dimshuffle(axes) if unroll: indices = list(range(input_length)) if go_backwards: indices = indices[::-1] successive_outputs = [] successive_states = [] states = initial_states for i in indices: output, new_states = step_function(inputs[i], states + constants) if len(successive_outputs) == 0: prev_output = zeros_like(output) else: prev_output = successive_outputs[-1] output = T.switch(mask[i], output, prev_output) kept_states = [] for state, new_state in zip(states, new_states): kept_states.append(T.switch(mask[i], new_state, state)) states = kept_states successive_outputs.append(output) successive_states.append(states) outputs = T.stack(*successive_outputs) states = [] for i in range(len(successive_states[-1])): states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states])) else: # build an all-zero tensor of shape (samples, output_dim) initial_output = step_function(inputs[0], initial_states + constants)[0] * 0 # Theano gets confused by broadcasting patterns in the scan op initial_output = T.unbroadcast(initial_output, 0, 1) def _step(input, mask, output_tm1, *states): output, new_states = step_function(input, states) # output previous output if masked. output = T.switch(mask, output, output_tm1) return_states = [] for state, new_state in zip(states, new_states): return_states.append(T.switch(mask, new_state, state)) return [output] + return_states results, _ = theano.scan( _step, sequences=[inputs, mask], outputs_info=[initial_output] + initial_states, non_sequences=constants, go_backwards=go_backwards) # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] else: if unroll: indices = list(range(input_length)) if go_backwards: indices = indices[::-1] successive_outputs = [] successive_states = [] states = initial_states for i in indices: output, states = step_function(inputs[i], states + constants) successive_outputs.append(output) successive_states.append(states) outputs = T.stack(*successive_outputs) states = [] for i in range(len(successive_states[-1])): states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states])) else: def _step(input, *states): output, new_states = step_function(input, states) return [output] + new_states results, _ = theano.scan( _step, sequences=inputs, outputs_info=[None] + initial_states, non_sequences=constants, go_backwards=go_backwards) # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] outputs = T.squeeze(outputs) last_output = outputs[-1] axes = [1, 0] + list(range(2, outputs.ndim)) outputs = outputs.dimshuffle(axes) states = [T.squeeze(state[-1]) for state in states] return last_output, outputs, states def switch(condition, then_expression, else_expression): '''condition: scalar tensor. ''' return T.switch(condition, then_expression, else_expression) def in_train_phase(x, alt): if _LEARNING_PHASE is 1: return x elif _LEARNING_PHASE is 0: return alt x = T.switch(_LEARNING_PHASE, x, alt) x._uses_learning_phase = True return x def in_test_phase(x, alt): if _LEARNING_PHASE is 1: return alt elif _LEARNING_PHASE is 0: return x x = T.switch(_LEARNING_PHASE, alt, x) x._uses_learning_phase = True return x # NN OPERATIONS def relu(x, alpha=0., max_value=None): assert hasattr(T.nnet, 'relu'), ('It looks like like your version of ' 'Theano is out of date. ' 'Install the latest version with:\n' 'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps') x = T.nnet.relu(x, alpha) if max_value is not None: x = T.minimum(x, max_value) return x def softmax(x): return T.nnet.softmax(x) def softplus(x): return T.nnet.softplus(x) def softsign(x): return T_softsign(x) def categorical_crossentropy(output, target, from_logits=False): if from_logits: output = T.nnet.softmax(output) else: # scale preds so that the class probas of each sample sum to 1 output /= output.sum(axis=-1, keepdims=True) # avoid numerical instability with _EPSILON clipping output = T.clip(output, _EPSILON, 1.0 - _EPSILON) return T.nnet.categorical_crossentropy(output, target) def sparse_categorical_crossentropy(output, target, from_logits=False): target = T.cast(T.flatten(target), 'int32') target = T.extra_ops.to_one_hot(target, nb_class=output.shape[-1]) target = reshape(target, shape(output)) return categorical_crossentropy(output, target, from_logits) def binary_crossentropy(output, target, from_logits=False): if from_logits: output = T.nnet.sigmoid(output) # avoid numerical instability with _EPSILON clipping output = T.clip(output, _EPSILON, 1.0 - _EPSILON) return T.nnet.binary_crossentropy(output, target) def sigmoid(x): return T.nnet.sigmoid(x) def hard_sigmoid(x): return T.nnet.hard_sigmoid(x) def tanh(x): return T.tanh(x) def dropout(x, level, noise_shape=None, seed=None): '''Sets entries in `x` to zero at random, while scaling the entire tensor. # Arguments x: tensor level: fraction of the entries in the tensor that will be set to 0. noise_shape: shape for randomly generated keep/drop flags, must be broadcastable to the shape of `x` seed: random seed to ensure determinism. ''' if level < 0. or level >= 1: raise Exception('Dropout level must be in interval [0, 1[.') if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) retain_prob = 1. - level if noise_shape is None: random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype) else: random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype) random_tensor = T.patternbroadcast(random_tensor, [dim == 1 for dim in noise_shape]) x *= random_tensor x /= retain_prob return x def l2_normalize(x, axis): norm = T.sqrt(T.sum(T.square(x), axis=axis, keepdims=True)) return x / norm # CONVOLUTIONS def _preprocess_conv2d_input(x, dim_ordering): if dim_ordering == 'tf': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) x = x.dimshuffle((0, 3, 1, 2)) return x def _preprocess_conv2d_kernel(kernel, dim_ordering): if dim_ordering == 'tf': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH kernel shape: (depth, input_depth, rows, cols) # TF kernel shape: (rows, cols, input_depth, depth) kernel = kernel.dimshuffle((3, 2, 0, 1)) return kernel def _preprocess_border_mode(border_mode): if border_mode == 'same': th_border_mode = 'half' elif border_mode == 'valid': th_border_mode = 'valid' else: raise Exception('Border mode not supported: ' + str(border_mode)) return th_border_mode def _preprocess_image_shape(dim_ordering, image_shape): # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if dim_ordering == 'tf': if image_shape: image_shape = (image_shape[0], image_shape[3], image_shape[1], image_shape[2]) if image_shape is not None: image_shape = tuple(int_or_none(v) for v in image_shape) return image_shape def _preprocess_filter_shape(dim_ordering, filter_shape): # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if dim_ordering == 'tf': if filter_shape: filter_shape = (filter_shape[3], filter_shape[2], filter_shape[0], filter_shape[1]) if filter_shape is not None: filter_shape = tuple(int_or_none(v) for v in filter_shape) return filter_shape def _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering): if border_mode == 'same': if np_kernel.shape[2] % 2 == 0: conv_out = conv_out[:, :, :(x.shape[2] + strides[0] - 1) // strides[0], :] if np_kernel.shape[3] % 2 == 0: conv_out = conv_out[:, :, :, :(x.shape[3] + strides[1] - 1) // strides[1]] if dim_ordering == 'tf': conv_out = conv_out.dimshuffle((0, 2, 3, 1)) return conv_out def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering=_IMAGE_DIM_ORDERING, image_shape=None, filter_shape=None, filter_dilation=(1, 1)): '''2D convolution. # Arguments kernel: kernel tensor. strides: strides tuple. border_mode: string, "same" or "valid". dim_ordering: "tf" or "th". Whether to use Theano or TensorFlow dimension ordering in inputs/kernels/ouputs. ''' if dim_ordering not in {'th', 'tf'}: raise Exception('Unknown dim_ordering ' + str(dim_ordering)) x = _preprocess_conv2d_input(x, dim_ordering) kernel = _preprocess_conv2d_kernel(kernel, dim_ordering) th_border_mode = _preprocess_border_mode(border_mode) np_kernel = kernel.eval() image_shape = _preprocess_image_shape(dim_ordering, image_shape) filter_shape = _preprocess_filter_shape(dim_ordering, filter_shape) # TODO: remove the if statement when theano with no filter dilation is deprecated. if filter_dilation == (1, 1): conv_out = T.nnet.conv2d(x, kernel, border_mode=th_border_mode, subsample=strides, input_shape=image_shape, filter_shape=filter_shape) else: conv_out = T.nnet.conv2d(x, kernel, border_mode=th_border_mode, subsample=strides, input_shape=image_shape, filter_shape=filter_shape, filter_dilation=filter_dilation) conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering) return conv_out def deconv2d(x, kernel, output_shape, strides=(1, 1), border_mode='valid', dim_ordering=_IMAGE_DIM_ORDERING, image_shape=None, filter_shape=None): '''2D deconvolution (transposed convolution). # Arguments kernel: kernel tensor. output_shape: desired dimensions of output. strides: strides tuple. border_mode: string, "same" or "valid". dim_ordering: "tf" or "th". Whether to use Theano or TensorFlow dimension ordering in inputs/kernels/ouputs. ''' flip_filters = False if dim_ordering not in {'th', 'tf'}: raise Exception('Unknown dim_ordering ' + str(dim_ordering)) x = _preprocess_conv2d_input(x, dim_ordering) kernel = _preprocess_conv2d_kernel(kernel, dim_ordering) kernel = kernel.dimshuffle((1, 0, 2, 3)) th_border_mode = _preprocess_border_mode(border_mode) np_kernel = kernel.eval() filter_shape = _preprocess_filter_shape(dim_ordering, filter_shape) op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=output_shape, kshp=filter_shape, subsample=strides, border_mode=th_border_mode, filter_flip=not flip_filters) conv_out = op(kernel, x, output_shape[2:]) conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering) return conv_out def atrous_conv2d(x, kernel, rate=1, border_mode='valid', dim_ordering=_IMAGE_DIM_ORDERING, image_shape=None, filter_shape=None): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), border_mode='valid', dim_ordering=_IMAGE_DIM_ORDERING): raise NotImplementedError def conv3d(x, kernel, strides=(1, 1, 1), border_mode='valid', dim_ordering='th', volume_shape=None, filter_shape=None): ''' Run on cuDNN if available. border_mode: string, "same" or "valid". ''' if dim_ordering not in {'th', 'tf'}: raise Exception('Unknown dim_ordering ' + str(dim_ordering)) if border_mode not in {'same', 'valid'}: raise Exception('Invalid border mode: ' + str(border_mode)) if dim_ordering == 'tf': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth) # TH kernel shape: (out_depth, input_depth, kernel_dim1, kernel_dim2, kernel_dim3) # TF kernel shape: (kernel_dim1, kernel_dim2, kernel_dim3, input_depth, out_depth) x = x.dimshuffle((0, 4, 1, 2, 3)) kernel = kernel.dimshuffle((4, 3, 0, 1, 2)) if volume_shape: volume_shape = (volume_shape[0], volume_shape[4], volume_shape[1], volume_shape[2], volume_shape[3]) if filter_shape: filter_shape = (filter_shape[4], filter_shape[3], filter_shape[0], filter_shape[1], filter_shape[2]) if border_mode == 'same': assert(strides == (1, 1, 1)) pad_dim1 = (kernel.shape[2] - 1) pad_dim2 = (kernel.shape[3] - 1) pad_dim3 = (kernel.shape[4] - 1) output_shape = (x.shape[0], x.shape[1], x.shape[2] + pad_dim1, x.shape[3] + pad_dim2, x.shape[4] + pad_dim3) output = T.zeros(output_shape) indices = (slice(None), slice(None), slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2), slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2), slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2)) x = T.set_subtensor(output[indices], x) border_mode = 'valid' border_mode_3d = (border_mode, border_mode, border_mode) conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4), filters=kernel.dimshuffle(0, 2, 1, 3, 4), border_mode=border_mode_3d) conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4) # support strides by manually slicing the output if strides != (1, 1, 1): conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]] if dim_ordering == 'tf': conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1)) return conv_out def pool2d(x, pool_size, strides=(1, 1), border_mode='valid', dim_ordering='th', pool_mode='max'): if border_mode == 'same': w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1 h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1 padding = (w_pad, h_pad) elif border_mode == 'valid': padding = (0, 0) else: raise Exception('Invalid border mode: ' + str(border_mode)) if dim_ordering not in {'th', 'tf'}: raise Exception('Unknown dim_ordering ' + str(dim_ordering)) if dim_ordering == 'tf': x = x.dimshuffle((0, 3, 1, 2)) if pool_mode == 'max': pool_out = pool.pool_2d(x, ds=pool_size, st=strides, ignore_border=True, padding=padding, mode='max') elif pool_mode == 'avg': pool_out = pool.pool_2d(x, ds=pool_size, st=strides, ignore_border=True, padding=padding, mode='average_exc_pad') else: raise Exception('Invalid pooling mode: ' + str(pool_mode)) if border_mode == 'same': expected_width = (x.shape[2] + strides[0] - 1) // strides[0] expected_height = (x.shape[3] + strides[1] - 1) // strides[1] pool_out = pool_out[:, :, : expected_width, : expected_height] if dim_ordering == 'tf': pool_out = pool_out.dimshuffle((0, 2, 3, 1)) return pool_out def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid', dim_ordering='th', pool_mode='max'): if border_mode == 'same': # TODO: add implementation for border_mode="same" raise Exception('border_mode="same" not supported with Theano.') elif border_mode == 'valid': ignore_border = True padding = (0, 0) else: raise Exception('Invalid border mode: ' + str(border_mode)) if dim_ordering not in {'th', 'tf'}: raise Exception('Unknown dim_ordering ' + str(dim_ordering)) if dim_ordering == 'tf': x = x.dimshuffle((0, 4, 1, 2, 3)) if pool_mode == 'max': # pooling over conv_dim2, conv_dim1 (last two channels) output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2), ds=(pool_size[1], pool_size[0]), st=(strides[1], strides[0]), ignore_border=ignore_border, padding=padding, mode='max') # pooling over conv_dim3 pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2), ds=(1, pool_size[2]), st=(1, strides[2]), ignore_border=ignore_border, padding=padding, mode='max') elif pool_mode == 'avg': # pooling over conv_dim2, conv_dim1 (last two channels) output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2), ds=(pool_size[1], pool_size[0]), st=(strides[1], strides[0]), ignore_border=ignore_border, padding=padding, mode='average_exc_pad') # pooling over conv_dim3 pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2), ds=(1, pool_size[2]), st=(1, strides[2]), ignore_border=ignore_border, padding=padding, mode='average_exc_pad') else: raise Exception('Invalid pooling mode: ' + str(pool_mode)) if dim_ordering == 'tf': pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1)) return pool_out # RANDOMNESS def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None): if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) return rng.normal(size=shape, avg=mean, std=std, dtype=dtype) def random_uniform(shape, low=0.0, high=1.0, dtype=_FLOATX, seed=None): if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) return rng.uniform(shape, low=low, high=high, dtype=dtype) def random_binomial(shape, p=0.0, dtype=_FLOATX, seed=None): if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) return rng.binomial(shape, p=p, dtype=dtype) # Theano implementation of CTC # Used with permission from Shawn Tan # https://github.com/shawntan/ # Note that tensorflow's native CTC code is significantly # faster than this def ctc_interleave_blanks(Y): Y_ = T.alloc(-1, Y.shape[0] * 2 + 1) Y_ = T.set_subtensor(Y_[T.arange(Y.shape[0]) * 2 + 1], Y) return Y_ def ctc_create_skip_idxs(Y): skip_idxs = T.arange((Y.shape[0] - 3) // 2) * 2 + 1 non_repeats = T.neq(Y[skip_idxs], Y[skip_idxs + 2]) return skip_idxs[non_repeats.nonzero()] def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev): active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()] active_next = T.cast(T.minimum( T.maximum( active + 1, T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1 ), log_p_curr.shape[0]), 'int32') common_factor = T.max(log_p_prev[:active]) p_prev = T.exp(log_p_prev[:active] - common_factor) _p_prev = zeros[:active_next] # copy over _p_prev = T.set_subtensor(_p_prev[:active], p_prev) # previous transitions _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1]) # skip transitions _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs]) updated_log_p_prev = T.log(_p_prev) + common_factor log_p_next = T.set_subtensor( zeros[:active_next], log_p_curr[:active_next] + updated_log_p_prev ) return active_next, log_p_next def ctc_path_probs(predict, Y, alpha=1e-4): smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0] L = T.log(smoothed_predict) zeros = T.zeros_like(L[0]) base = T.set_subtensor(zeros[:1], np.float32(1)) log_first = zeros f_skip_idxs = ctc_create_skip_idxs(Y) b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) # there should be a shortcut to calculating this def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev): f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev) b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev) return f_active_next, log_f_next, b_active_next, log_b_next [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan( step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first]) idxs = T.arange(L.shape[1]).dimshuffle('x', 0) mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1] log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L return log_probs, mask def ctc_cost(predict, Y): log_probs, mask = ctc_path_probs(predict, ctc_interleave_blanks(Y)) common_factor = T.max(log_probs) total_log_prob = T.log(T.sum(T.exp(log_probs - common_factor)[mask.nonzero()])) + common_factor return -total_log_prob # batchifies original CTC code def ctc_batch_cost(y_true, y_pred, input_length, label_length): '''Runs CTC loss algorithm on each batch element. # Arguments y_true: tensor (samples, max_string_length) containing the truth labels y_pred: tensor (samples, time_steps, num_categories) containing the prediction, or output of the softmax input_length: tensor (samples,1) containing the sequence length for each batch item in y_pred label_length: tensor (samples,1) containing the sequence length for each batch item in y_true # Returns Tensor with shape (samples,1) containing the CTC loss of each element ''' def ctc_step(y_true_step, y_pred_step, input_length_step, label_length_step): y_pred_step = y_pred_step[0: input_length_step[0]] y_true_step = y_true_step[0:label_length_step[0]] return ctc_cost(y_pred_step, y_true_step) ret, _ = theano.scan( fn = ctc_step, outputs_info=None, sequences=[y_true, y_pred, input_length, label_length] ) ret = ret.dimshuffle('x', 0) return ret
from decimal import Decimal as D import json import base64 from django import test from django.test.client import Client from django.contrib.auth.models import User from django.core.urlresolvers import reverse from accounts import models USERNAME, PASSWORD = 'client', 'password' def get_headers(): # Create a user to authenticate as try: User.objects.get(username=USERNAME) except User.DoesNotExist: User.objects.create_user(USERNAME, None, PASSWORD) auth = "%s:%s" % (USERNAME, PASSWORD) auth_headers = { 'HTTP_AUTHORIZATION': 'Basic %s' % base64.b64encode(auth) } return auth_headers def get(url): return Client().get(url, **get_headers()) def post(url, payload): """ POST a JSON-encoded payload """ return Client().post( url, json.dumps(payload), content_type="application/json", **get_headers()) class TestCreatingAnAccountErrors(test.TestCase): def setUp(self): self.payload = { 'start_date': '2012-01-01T09:00:00+03:00', 'end_date': '2013-06-01T09:00:00+03:00', 'amount': '400.00', } def test_missing_dates(self): payload = self.payload.copy() del payload['start_date'] response = post(reverse('accounts'), payload) self.assertEqual(400, response.status_code) self.assertTrue('message' in json.loads(response.content)) def test_timezone_naive_start_date(self): payload = self.payload.copy() payload['start_date'] = '2013-01-01T09:00:00' response = post(reverse('accounts'), payload) self.assertEqual(400, response.status_code) self.assertTrue('message' in json.loads(response.content)) def test_timezone_naive_end_date(self): payload = self.payload.copy() payload['end_date'] = '2013-06-01T09:00:00' response = post(reverse('accounts'), payload) self.assertEqual(400, response.status_code) self.assertTrue('message' in json.loads(response.content)) def test_dates_in_wrong_order(self): payload = self.payload.copy() payload['start_date'] = '2013-06-01T09:00:00+03:00' payload['end_date'] = '2013-01-01T09:00:00+03:00' response = post(reverse('accounts'), payload) self.assertEqual(400, response.status_code) self.assertTrue('message' in json.loads(response.content)) def test_invalid_amount(self): payload = self.payload.copy() payload['amount'] = 'silly' response = post(reverse('accounts'), payload) self.assertEqual(400, response.status_code) self.assertTrue('message' in json.loads(response.content)) def test_negative_amount(self): payload = self.payload.copy() payload['amount'] = '-100' response = post(reverse('accounts'), payload) self.assertEqual(400, response.status_code) self.assertTrue('message' in json.loads(response.content)) def test_amount_too_low(self): payload = self.payload.copy() payload['amount'] = '1.00' with self.settings(ACCOUNTS_MIN_LOAD_VALUE=D('25.00')): response = post(reverse('accounts'), payload) self.assertEqual(403, response.status_code) data = json.loads(response.content) self.assertEqual('C101', data['code']) def test_amount_too_high(self): payload = self.payload.copy() payload['amount'] = '5000.00' with self.settings(ACCOUNTS_MAX_ACCOUNT_VALUE=D('500.00')): response = post(reverse('accounts'), payload) self.assertEqual(403, response.status_code) data = json.loads(response.content) self.assertEqual('C102', data['code']) class TestSuccessfullyCreatingAnAccount(test.TestCase): def setUp(self): self.payload = { 'start_date': '2013-01-01T09:00:00+03:00', 'end_date': '2013-06-01T09:00:00+03:00', 'amount': '400.00', 'account_type': 'Test accounts', } # Submit request to create a new account, then fetch the detail # page that is returned. self.create_response = post(reverse('accounts'), self.payload) if 'Location' in self.create_response: self.detail_response = get( self.create_response['Location']) self.payload = json.loads(self.detail_response.content) self.account = models.Account.objects.get( code=self.payload['code']) def test_returns_201(self): self.assertEqual(201, self.create_response.status_code) def test_returns_a_valid_location(self): self.assertEqual(200, self.detail_response.status_code) def test_detail_view_returns_correct_keys(self): keys = ['code', 'start_date', 'end_date', 'balance'] for key in keys: self.assertTrue(key in self.payload) def test_returns_dates_in_utc(self): self.assertEqual('2013-01-01T06:00:00+00:00', self.payload['start_date']) self.assertEqual('2013-06-01T06:00:00+00:00', self.payload['end_date']) def test_loads_the_account_with_the_right_amount(self): self.assertEqual('400.00', self.payload['balance']) def test_detail_view_returns_redemptions_url(self): self.assertTrue('redemptions_url' in self.payload) def test_detail_view_returns_refunds_url(self): self.assertTrue('refunds_url' in self.payload) class TestMakingARedemption(test.TestCase): def setUp(self): self.create_payload = { 'start_date': '2012-01-01T09:00:00+03:00', 'end_date': '2099-06-01T09:00:00+03:00', 'amount': '400.00', 'account_type': 'Test accounts', } self.create_response = post(reverse('accounts'), self.create_payload) self.assertEqual(201, self.create_response.status_code) self.detail_response = get(self.create_response['Location']) redemption_url = json.loads(self.detail_response.content)['redemptions_url'] self.redeem_payload = { 'amount': '50.00', 'merchant_reference': '1234' } self.redeem_response = post(redemption_url, self.redeem_payload) transfer_url = self.redeem_response['Location'] self.transfer_response = get( transfer_url) def test_returns_201_for_the_redeem_request(self): self.assertEqual(201, self.redeem_response.status_code) def test_returns_valid_transfer_url(self): url = self.redeem_response['Location'] response = get(url) self.assertEqual(200, response.status_code) def test_returns_the_correct_data_in_the_transfer_request(self): data = json.loads(self.transfer_response.content) keys = ['source_code', 'source_name', 'destination_code', 'destination_name', 'amount', 'datetime', 'merchant_reference', 'description'] for key in keys: self.assertTrue(key in data, "Key '%s' not found in payload" % key) self.assertEqual('50.00', data['amount']) self.assertIsNone(data['destination_code']) def test_works_without_merchant_reference(self): self.redeem_payload = { 'amount': '10.00', } redemption_url = json.loads(self.detail_response.content)['redemptions_url'] response = post(redemption_url, self.redeem_payload) self.assertEqual(201, response.status_code) class TestTransferView(test.TestCase): def test_returns_404_for_missing_transfer(self): url = reverse('transfer', kwargs={'reference': '12345678123456781234567812345678'}) response = get(url) self.assertEqual(404, response.status_code) class TestMakingARedemptionThenRefund(test.TestCase): def setUp(self): self.create_payload = { 'start_date': '2012-01-01T09:00:00+03:00', 'end_date': '2099-06-01T09:00:00+03:00', 'amount': '400.00', 'account_type': 'Test accounts', } self.create_response = post( reverse('accounts'), self.create_payload) self.detail_response = get(self.create_response['Location']) self.redeem_payload = { 'amount': '50.00', 'merchant_reference': '1234' } account_dict = json.loads(self.detail_response.content) redemption_url = account_dict['redemptions_url'] self.redeem_response = post(redemption_url, self.redeem_payload) self.refund_payload = { 'amount': '25.00', 'merchant_reference': '1234', } refund_url = account_dict['refunds_url'] self.refund_response = post(refund_url, self.refund_payload) def test_returns_201_for_the_refund_request(self): self.assertEqual(201, self.refund_response.status_code) def test_works_without_a_merchant_reference(self): self.refund_payload = { 'amount': '25.00', } account_dict = json.loads(self.detail_response.content) refund_url = account_dict['refunds_url'] self.refund_response = post(refund_url, self.refund_payload) self.assertEqual(201, self.refund_response.status_code) class TestMakingARedemptionThenReverse(test.TestCase): def setUp(self): self.create_payload = { 'start_date': '2012-01-01T09:00:00+03:00', 'end_date': '2099-06-01T09:00:00+03:00', 'amount': '400.00', 'account_type': 'Test accounts', } self.create_response = post(reverse('accounts'), self.create_payload) self.detail_response = get(self.create_response['Location']) account_dict = json.loads(self.detail_response.content) self.redeem_payload = { 'amount': '50.00', 'merchant_reference': '1234' } redemption_url = account_dict['redemptions_url'] self.redeem_response = post(redemption_url, self.redeem_payload) transfer_response = get(self.redeem_response['Location']) transfer_dict = json.loads(transfer_response.content) self.reverse_payload = {} reverse_url = transfer_dict['reverse_url'] self.reverse_response = post(reverse_url, self.reverse_payload) def test_returns_201_for_the_reverse_request(self): self.assertEqual(201, self.reverse_response.status_code) class TestMakingARedemptionThenTransferRefund(test.TestCase): def setUp(self): self.create_payload = { 'start_date': '2012-01-01T09:00:00+03:00', 'end_date': '2099-06-01T09:00:00+03:00', 'amount': '1000.00', 'account_type': 'Test accounts', } self.create_response = post( reverse('accounts'), self.create_payload) self.detail_response = get(self.create_response['Location']) account_dict = json.loads(self.detail_response.content) self.redeem_payload = {'amount': '300.00'} redemption_url = account_dict['redemptions_url'] self.redeem_response = post(redemption_url, self.redeem_payload) self.transfer_response = get(self.redeem_response['Location']) transfer_dict = json.loads(self.transfer_response.content) self.refund_payload = { 'amount': '25.00', } refund_url = transfer_dict['refunds_url'] self.refund_response = post(refund_url, self.refund_payload) def test_returns_201_for_the_refund_request(self): self.assertEqual(201, self.refund_response.status_code) def test_refunds_are_capped_at_value_of_redemption(self): # Make another redemption to ensure the redemptions account has enough # funds to attemp the below refund self.redeem_payload = {'amount': '300.00'} account_dict = json.loads(self.detail_response.content) redemption_url = account_dict['redemptions_url'] post(redemption_url, self.redeem_payload) self.refund_payload = { 'amount': '280.00', } transfer_dict = json.loads(self.transfer_response.content) refund_url = transfer_dict['refunds_url'] response = post(refund_url, self.refund_payload) self.assertEqual(403, response.status_code)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for operators defined in nn_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops @ops.RegisterGradient("Conv2DBackpropInput") def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ # We call the gen_nn_ops backprop functions instead of nn_ops backprop # functions for performance reasons in Eager mode. See _Conv2DGrad. return [ None, gen_nn_ops.conv2d_backprop_filter( grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), explicit_paddings=op.get_attr("explicit_paddings"), use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"), data_format=op.get_attr("data_format").decode()), gen_nn_ops.conv2d( grad, op.inputs[1], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), explicit_paddings=op.get_attr("explicit_paddings"), use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"), data_format=op.get_attr("data_format").decode()) ] @ops.RegisterGradient("Conv2DBackpropFilter") def _Conv2DBackpropFilterGrad(op, grad): # We call the gen_nn_ops backprop functions instead of nn_ops backprop # functions for performance reasons in Eager mode. See _Conv2DGrad. return [ gen_nn_ops.conv2d_backprop_input( array_ops.shape(op.inputs[0]), grad, op.inputs[2], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), explicit_paddings=op.get_attr("explicit_paddings"), use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"), data_format=op.get_attr("data_format").decode()), None, gen_nn_ops.conv2d( op.inputs[0], grad, dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), explicit_paddings=op.get_attr("explicit_paddings"), use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"), data_format=op.get_attr("data_format").decode()) ] @ops.RegisterGradient("DepthwiseConv2dNativeBackpropInput") def _DepthwiseConv2dNativeBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ return [ None, nn_ops.depthwise_conv2d_native_backprop_filter( grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format")), nn_ops.depthwise_conv2d_native( grad, op.inputs[1], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format")) ] @ops.RegisterGradient("DepthwiseConv2dNativeBackpropFilter") def _DepthwiseConv2dNativeBackpropFilterGrad(op, grad): return [ nn_ops.depthwise_conv2d_native_backprop_input( array_ops.shape(op.inputs[0]), grad, op.inputs[2], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format")), None, nn_ops.depthwise_conv2d_native( op.inputs[0], grad, dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format")) ] @ops.RegisterGradient("Conv3D") def _Conv3DGrad(op, grad): data_format = op.get_attr("data_format").decode() return [ nn_ops.conv3d_backprop_input_v2( array_ops.shape(op.inputs[0]), op.inputs[1], grad, dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=data_format), nn_ops.conv3d_backprop_filter_v2( op.inputs[0], array_ops.shape(op.inputs[1]), grad, dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=data_format) ] @ops.RegisterGradient("Conv3DBackpropInputV2") def _Conv3DBackpropInputGrad(op, grad): data_format = op.get_attr("data_format").decode() return [ None, nn_ops.conv3d_backprop_filter_v2( grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=data_format), nn_ops.conv3d( grad, op.inputs[1], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=data_format) ] @ops.RegisterGradient("Conv3DBackpropFilterV2") def _Conv3DBackpropFilterGrad(op, grad): data_format = op.get_attr("data_format").decode() return [ nn_ops.conv3d_backprop_input_v2( array_ops.shape(op.inputs[0]), grad, op.inputs[2], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=data_format), None, nn_ops.conv3d( op.inputs[0], grad, dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=data_format) ] @ops.RegisterGradient("AvgPool3D") def _AvgPool3DGrad(op, grad): return gen_nn_ops.avg_pool3d_grad( array_ops.shape(op.inputs[0]), grad, ksize=op.get_attr("ksize"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format").decode()) @ops.RegisterGradient("AvgPool3DGrad") def _AvgPool3DGradGrad(op, grad): return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops.avg_pool3d( grad, op.get_attr("ksize"), op.get_attr("strides"), op.get_attr("padding"), data_format=op.get_attr("data_format").decode())) @ops.RegisterGradient("MaxPool3D") def _MaxPool3DGrad(op, grad): return gen_nn_ops.max_pool3d_grad( op.inputs[0], op.outputs[0], grad, ksize=op.get_attr("ksize"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format").decode()) @ops.RegisterGradient("MaxPool3DGrad") def _MaxPool3DGradGrad(op, grad): return (array_ops.zeros( shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype), array_ops.zeros( shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype), gen_nn_ops.max_pool3d_grad_grad( op.inputs[0], op.inputs[1], grad, op.get_attr("ksize"), op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format").decode())) @ops.RegisterGradient("MaxPool3DGradGrad") def _MaxPool3DGradGradGrad(op, grad): return (array_ops.zeros( shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype), array_ops.zeros( shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype), gen_nn_ops.max_pool3d_grad( op.inputs[0], op.inputs[1], grad, op.get_attr("ksize"), op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format").decode())) @ops.RegisterGradient("Softmax") def _SoftmaxGrad(op, grad_softmax): """The derivative of the softmax nonlinearity. We assume that probs is of shape [batch_size * dim] The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax'). This matrix is diagonal minus a rank one matrix, so it is easy to implement as follows: grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax Args: op: the Softmax op. grad_softmax: the tensor representing the gradient w.r.t. the softmax output. Returns: gradient w.r.t the input to the softmax """ softmax = op.outputs[0] sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True) return (grad_softmax - sum_channels) * softmax @ops.RegisterGradient("LogSoftmax") def _LogSoftmaxGrad(op, grad): """The gradient for log_softmax. log_softmax = input - log(sum(exp(input)) dlog_softmax/dinput = diag - softmax(input) Args: op: The log softmax op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input. """ softmax = math_ops.exp(op.outputs[0]) return grad - math_ops.reduce_sum(grad, -1, keepdims=True) * softmax @ops.RegisterGradient("BiasAdd") def _BiasAddGrad(op, received_grad): """Return the gradients for the 2 inputs of bias_op. The first input of unused_bias_op is the tensor t, and its gradient is just the gradient the unused_bias_op received. The second input of unused_bias_op is the bias vector which has one fewer dimension than "received_grad" (the batch dimension.) Its gradient is the received gradient Summed on the batch dimension, which is the first dimension. Args: op: The BiasOp for which we need to generate gradients. received_grad: Tensor. The gradients passed to the BiasOp. Returns: Two tensors, the first one for the "tensor" input of the BiasOp, the second one for the "bias" input of the BiasOp. """ try: data_format = op.get_attr("data_format") except ValueError: data_format = None return (received_grad, gen_nn_ops.bias_add_grad( out_backprop=received_grad, data_format=data_format)) @ops.RegisterGradient("BiasAddGrad") def _BiasAddGradGrad(op, received_grad): """Gradient for the BiasAddGrad op. Args: op: BiasAddGrad op for which we are calculating gradients. received_grad: The gradients passed to the BiasAddGrad op. Returns: A single gradient Tensor for the input to BiasAddGrad (which is the gradient of the bias term in BiasAdd) """ try: data_format = op.get_attr("data_format") except ValueError: data_format = None shape = array_ops.shape(op.inputs[0]) bias_shape = array_ops.shape(received_grad) if data_format == b"NCHW": expanded_shape = array_ops.concat([ array_ops.ones_like(shape[:1]), bias_shape, array_ops.ones_like(shape[2:]) ], 0) tile_mults = array_ops.concat([shape[:1], [1], shape[2:]], 0) else: expanded_shape = array_ops.concat( [array_ops.ones_like(shape[:-1]), bias_shape], 0) tile_mults = array_ops.concat([shape[:-1], [1]], 0) expanded_grad = array_ops.reshape(received_grad, expanded_shape) return array_ops.tile(expanded_grad, tile_mults) @ops.RegisterGradient("BiasAddV1") def _BiasAddGradV1(unused_bias_op, received_grad): """Return the gradients for the 2 inputs of bias_op. The first input of unused_bias_op is the tensor t, and its gradient is just the gradient the unused_bias_op received. The second input of unused_bias_op is the bias vector which has one fewer dimension than "received_grad" (the batch dimension.) Its gradient is the received gradient Summed on the batch dimension, which is the first dimension. Args: unused_bias_op: The BiasOp for which we need to generate gradients. received_grad: Tensor. The gradients passed to the BiasOp. Returns: Two tensors, the first one for the "tensor" input of the BiasOp, the second one for the "bias" input of the BiasOp. """ reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1) return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor)) @ops.RegisterGradient("Relu") def _ReluGrad(op, grad): return gen_nn_ops.relu_grad(grad, op.outputs[0]) @ops.RegisterGradient("EluGrad") def _EluGradGrad(op, grad): elu_x = op.inputs[1] return (gen_nn_ops.elu_grad(grad, elu_x), array_ops.where( elu_x < 0, grad * op.inputs[0], array_ops.zeros_like(elu_x))) @ops.RegisterGradient("SeluGrad") def _SeluGradGrad(op, grad): selu_x = op.inputs[1] return (gen_nn_ops.selu_grad(grad, selu_x), array_ops.where( selu_x < 0., grad * op.inputs[0], array_ops.zeros_like(selu_x))) @ops.RegisterGradient("Relu6") def _Relu6Grad(op, grad): return gen_nn_ops.relu6_grad(grad, op.outputs[0]) @ops.RegisterGradient("Relu6Grad") def _Relu6GradGrad(op, grad): x = op.inputs[1] return (gen_nn_ops.relu6_grad(grad, x), array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)) @ops.RegisterGradient("LeakyRelu") def _LeakyReluGrad(op, grad): x = op.inputs[0] alpha = op.get_attr("alpha") return gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha) @ops.RegisterGradient("LeakyReluGrad") def _LeakyReluGradGrad(op, grad): x = op.inputs[1] alpha = op.get_attr("alpha") return (gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha), array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)) @ops.RegisterGradient("Elu") def _EluGrad(op, grad): return gen_nn_ops.elu_grad(grad, op.outputs[0]) @ops.RegisterGradient("Selu") def _SeluGrad(op, grad): return gen_nn_ops.selu_grad(grad, op.outputs[0]) @ops.RegisterGradient("Softplus") def _SoftplusGrad(op, grad): return grad * math_ops.sigmoid(op.inputs[0]) @ops.RegisterGradient("SoftplusGrad") def _SoftplusGradGrad(op, grad): # Let: # y = tf.nn.softplus(x) # dx = gen_nn_ops.softplus_grad(dy, x) = dy / (1 + exp(-x)) # This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx. dy, x = op.inputs with ops.control_dependencies([grad]): ddy = gen_nn_ops.softplus_grad(grad, x) d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x)) return (ddy, d2x) @ops.RegisterGradient("Softsign") def _SoftsignGrad(op, grad): return gen_nn_ops.softsign_grad(grad, op.inputs[0]) @ops.RegisterGradient("ReluGrad") def _ReluGradGrad(op, grad): x = op.inputs[1] return (gen_nn_ops.relu_grad(grad, x), array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)) def _BroadcastMul(vec, mat): """Multiply after broadcasting vec to match dimensions of mat. Args: vec: A 1-D tensor of dimension [D0] mat: A 2-D tensor of dimension [D0, D1] Returns: A tensor of dimension [D0, D1], the result of vec * mat """ # Reshape vec to [D0, 1] vec = array_ops.expand_dims(vec, -1) return vec * mat @ops.RegisterGradient("SoftmaxCrossEntropyWithLogits") def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad): """Gradient function for SoftmaxCrossEntropyWithLogits.""" # grad_loss is the backprop for cost, and we multiply it with the gradients # (which is output[1]) # grad_grad is the backprop for softmax gradient. # # Second derivative is just softmax derivative w.r.t. logits. softmax_grad = op.outputs[1] grad = _BroadcastMul(grad_loss, softmax_grad) def IsZero(g): # Some introspection to check if the gradient is feeding zeros if context.executing_eagerly(): # TODO(apassos) add an efficient way to detect eager zeros here. return False if g.op.type in ("ZerosLike", "Zeros"): return True const_fill_value = tensor_util.constant_value(g) return const_fill_value is not None and (const_fill_value == 0).all() logits = op.inputs[0] if grad_grad is not None and not IsZero(grad_grad): softmax = nn_ops.softmax(logits) grad += ((grad_grad - array_ops.squeeze( math_ops.matmul( array_ops.expand_dims(grad_grad, 1), array_ops.expand_dims(softmax, 2)), axis=1)) * softmax) return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits)) @ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits") def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _): """Gradient function for SparseSoftmaxCrossEntropyWithLogits.""" # grad_0 is the backprop for cost, and we multiply it with the gradients # (which is output[1]) # There is no gradient for the labels # # Currently there is no way to take the second derivative of this op # due to the fused implementation's interaction with tf.gradients(), # so we make sure we prevent silently incorrect results by raising # an error if the second derivative is requested via prevent_gradient. sparse_softmax_grad_without_gradient = array_ops.prevent_gradient( op.outputs[1], message="Currently there is no way to take the second " "derivative of sparse_softmax_cross_entropy_with_logits due to the fused " "implementation's interaction with tf.gradients()") return _BroadcastMul(grad_0, sparse_softmax_grad_without_gradient), None @ops.RegisterGradient("Conv2D") def _Conv2DGrad(op, grad): """Gradient function for Conv2D.""" dilations = op.get_attr("dilations") strides = op.get_attr("strides") padding = op.get_attr("padding") explicit_paddings = op.get_attr("explicit_paddings") use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu") data_format = op.get_attr("data_format") shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]]) # We call the gen_nn_ops backprop functions instead of nn_ops backprop # functions for performance reasons in Eager mode. gen_nn_ops functions take a # `explicit_paddings` parameter, but nn_ops functions do not. So if were were # to use the nn_ops functions, we would have to convert `padding` and # `explicit_paddings` into a single `padding` parameter, increasing overhead # in Eager mode. return [ gen_nn_ops.conv2d_backprop_input( shape_0, op.inputs[1], grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format), gen_nn_ops.conv2d_backprop_filter( op.inputs[0], shape_1, grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format) ] @ops.RegisterGradient("DepthwiseConv2dNative") def _DepthwiseConv2dNativeGrad(op, grad): return [ nn_ops.depthwise_conv2d_native_backprop_input( array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"), op.get_attr("padding"), data_format=op.get_attr("data_format")), nn_ops.depthwise_conv2d_native_backprop_filter( op.inputs[0], array_ops.shape(op.inputs[1]), grad, op.get_attr("strides"), op.get_attr("padding"), data_format=op.get_attr("data_format")) ] @ops.RegisterGradient("Dilation2D") def _Dilation2DGrad(op, grad): return [ nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad, op.get_attr("strides"), op.get_attr("rates"), op.get_attr("padding")), nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad, op.get_attr("strides"), op.get_attr("rates"), op.get_attr("padding")) ] @ops.RegisterGradient("LRN") def _LRNGrad(op, grad): depth_radius = op.get_attr("depth_radius") bias = op.get_attr("bias") alpha = op.get_attr("alpha") beta = op.get_attr("beta") return [ gen_nn_ops.lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius, bias, alpha, beta) ] @ops.RegisterGradient("AvgPool") def _AvgPoolGrad(op, grad): return gen_nn_ops.avg_pool_grad( array_ops.shape(op.inputs[0]), grad, op.get_attr("ksize"), op.get_attr("strides"), op.get_attr("padding"), data_format=op.get_attr("data_format")) @ops.RegisterGradient("AvgPoolGrad") def _AvgPoolGradGrad(op, grad): return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops.avg_pool( grad, op.get_attr("ksize"), op.get_attr("strides"), op.get_attr("padding"), data_format=op.get_attr("data_format"))) @ops.RegisterGradient("MaxPool") def _MaxPoolGrad(op, grad): return gen_nn_ops.max_pool_grad( op.inputs[0], op.outputs[0], grad, op.get_attr("ksize"), op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format")) @ops.RegisterGradient("MaxPoolV2") def _MaxPoolGradV2(op, grad): ksize = op.inputs[1] strides = op.inputs[2] return gen_nn_ops.max_pool_grad_v2( op.inputs[0], op.outputs[0], grad, ksize, strides, padding=op.get_attr("padding"), data_format=op.get_attr("data_format")), None, None @ops.RegisterGradient("MaxPoolWithArgmax") def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad): del unused_argmax_grad return gen_nn_ops.max_pool_grad_with_argmax( op.inputs[0], grad, op.outputs[1], op.get_attr("ksize"), op.get_attr("strides"), padding=op.get_attr("padding"), include_batch_in_index=op.get_attr("include_batch_in_index")) @ops.RegisterGradient("MaxPoolGrad") def _MaxPoolGradGrad(op, grad): return (array_ops.zeros( shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype), array_ops.zeros( shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype), gen_nn_ops.max_pool_grad_grad( op.inputs[0], op.inputs[1], grad, op.get_attr("ksize"), op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format"))) @ops.RegisterGradient("MaxPoolGradV2") def _MaxPoolGradGradV2(op, grad): ksize = op.inputs[3] strides = op.inputs[4] return (array_ops.zeros( shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype), array_ops.zeros( shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype), gen_nn_ops.max_pool_grad_grad_v2( op.inputs[0], op.inputs[1], grad, ksize, strides, padding=op.get_attr("padding"), data_format=op.get_attr("data_format")), None, None) @ops.RegisterGradient("MaxPoolGradGrad") def _MaxPoolGradGradGrad(op, grad): return (array_ops.zeros( shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype), array_ops.zeros( shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype), gen_nn_ops.max_pool_grad( op.inputs[0], op.inputs[1], grad, op.get_attr("ksize"), op.get_attr("strides"), padding=op.get_attr("padding"), data_format=op.get_attr("data_format"))) @ops.RegisterGradient("FractionalMaxPool") def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2): """Returns gradient for FractionalMaxPool. Since FractionalMaxPool has three outputs, there are three gradients passed in for each of the outputs. Only the first one is useful, the other two gradients are empty. Args: op: The FractionalMaxPoolOp. grad_0: Gradient with respect to op.outputs[0] unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty. unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty. Returns: Input backprop for FractionalMaxPool op. """ return gen_nn_ops.fractional_max_pool_grad( op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2], op.get_attr("overlapping")) @ops.RegisterGradient("FractionalAvgPool") def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2): """Returns gradient for FractionalAvgPool. Since FractionalAvgPool has three outputs, there are three gradients passed in for each of the outputs. Only the first one is useful, the other two gradients are empty. Args: op: The FractionalAvgPoolOp. grad_0: Gradient with respect to op.outputs[0] unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty. unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty. Returns: Input backprop for FractionalAvgPool op. """ return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0, op.outputs[1], op.outputs[2], op.get_attr("overlapping")) @ops.RegisterGradient("BatchNormWithGlobalNormalization") def _BatchNormWithGlobalNormalizationGrad(op, grad): """Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization. We do not backprop anything for the mean and var intentionally as they are not being trained with backprop in the operation. Args: op: The BatchNormOp for which we need to generate gradients. grad: Tensor. The gradients passed to the BatchNormOp. Returns: dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon))) dm: Backprop for mean, which is sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon)) dv: Backprop for variance, which is sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2) db: Backprop for beta, which is grad reduced in all except the last dimension. dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon))) """ dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad( op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad, op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization")) return dx, dm, dv, db, dg def _BaseFusedBatchNormGrad(op, version, *grad): """Return the gradients for the 3 inputs of BatchNorm. Args: op: The BatchNormOp for which we need to compute gradients. version: Integer indicating which version to use of the fused batch norm gradient. *grad: An argument list for tensors of gradients wrt the outputs with grad[0] as grad_y. Returns: grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) * [grad_y - mean(grad_y) - (x - mean(x)) * mean(grad_y * (x - mean(x))) / (variance + epsilon)] in training mode; grad_y * scale * rsqrt(pop_variance + epsilon) in freeze mode. grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) * rsqrt(variance + epsilon)) in training mode; sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon)) in freeze mode. grad_offset: gradient for offset, which is sum(grad_y) in training mode; sum(grad_y) in freeze mode. """ x = op.inputs[0] grad_y = grad[0] scale = op.inputs[1] epsilon = op.get_attr("epsilon") data_format = op.get_attr("data_format") is_training = op.get_attr("is_training") if version == 2: grad_fun = gen_nn_ops.fused_batch_norm_grad_v3 elif version == 1: grad_fun = gen_nn_ops.fused_batch_norm_grad_v2 else: grad_fun = gen_nn_ops.fused_batch_norm_grad if is_training: args = { "y_backprop": grad_y, "x": x, "scale": scale, "reserve_space_1": op.outputs[3], "reserve_space_2": op.outputs[4], "epsilon": epsilon, "data_format": data_format, "is_training": is_training } if version == 2: args["reserve_space_3"] = op.outputs[5] return grad_fun(**args) else: pop_mean = op.inputs[3] pop_var = op.inputs[4] if data_format == b"NCHW": x = array_ops.transpose(x, [0, 2, 3, 1]) grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1]) args = { "y_backprop": grad_y, "x": x, "scale": scale, "reserve_space_1": pop_mean, "reserve_space_2": pop_var, "epsilon": epsilon, "data_format": "NHWC", "is_training": is_training } if version == 2: args["reserve_space_3"] = op.outputs[5] dx, dscale, doffset, _, _ = grad_fun(**args) if data_format == b"NCHW": dx = array_ops.transpose(dx, [0, 3, 1, 2]) return dx, dscale, doffset, None, None @ops.RegisterGradient("FusedBatchNorm") def _FusedBatchNormGrad(op, *grad): return _BaseFusedBatchNormGrad(op, 0, *grad) @ops.RegisterGradient("FusedBatchNormV2") def _FusedBatchNormV2Grad(op, *grad): return _BaseFusedBatchNormGrad(op, 1, *grad) @ops.RegisterGradient("FusedBatchNormV3") def _FusedBatchNormV3Grad(op, *grad): return _BaseFusedBatchNormGrad(op, 2, *grad) def _BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training=True): """Returns the gradients for the 3 inputs of BatchNorm. Args: grad_y: A `Tensor` of 4 dimensions for gradient for y. x: A `Tensor` of 4 dimensions for x. scale: A `Tensor` of 1 dimension for scaling. pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when is_training=False. pop_var: A `Tensor` of 1 dimension for the population variance. Only used when is_training=False. epsilon: A small float number added to the variance of x. data_format: The data format for input. Either b"NHWC" or b"NCHW". is_training: A bool value to indicate the operation is for training (default) or inference. Returns: A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient for x, grad_scale the gradient for scale, and grad_offset the gradient for offset. """ x_dtype = x.dtype.base_dtype if x_dtype == dtypes.float16: # float16 math is too imprecise, so we do the batch norm gradient # computations in float32. x = math_ops.cast(x, dtypes.float32) grad_y = math_ops.cast(grad_y, dtypes.float32) if is_training: if data_format == b"NHWC": keepdims = False reduce_axis = [0, 1, 2] else: keepdims = True reduce_axis = [0, 2, 3] shape = [1, array_ops.size(scale), 1, 1] scale = array_ops.reshape(scale, shape) mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims) mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims) var_x = math_ops.reduce_mean( math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)), reduce_axis, keepdims=keepdims) grad_y_offset = grad_y - mean_grad_y x_offset = x - mean_x mean = math_ops.reduce_mean( grad_y * x_offset, axis=reduce_axis, keepdims=keepdims) grad_x = scale * math_ops.rsqrt(var_x + epsilon) * ( grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset) grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum( grad_y * x_offset, axis=reduce_axis, keepdims=keepdims) if data_format == b"NCHW": grad_scale = array_ops.squeeze(grad_scale) grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis) return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset else: if data_format == b"NHWC": reduce_axis = [0, 1, 2] else: reduce_axis = [0, 2, 3] shape = [1, array_ops.size(pop_mean), 1, 1] pop_mean = array_ops.reshape(pop_mean, shape) pop_var = array_ops.reshape(pop_var, shape) scale = array_ops.reshape(scale, shape) grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis) var_rsqrt = math_ops.rsqrt(pop_var + epsilon) grad_scale = math_ops.reduce_sum( grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis) grad_x = grad_y * scale * var_rsqrt return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset @ops.RegisterGradient("FusedBatchNormGrad") def _FusedBatchNormGradGrad(op, *grad): """Returns the gradients for the 3 inputs of FusedBatchNormGrad. Args: op: The FusedBatchNormGradOp for which we need to compute gradients. *grad: An argument list for tensors of gradients wrt the outputs with grad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as grad_grad_offset. Returns: A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y is the gradient for grad_y, grad_x the gradient for x, grad_scale the gradient for scale. """ data_format = op.get_attr("data_format") epsilon = op.get_attr("epsilon") is_training = op.get_attr("is_training") grad_y = op.inputs[0] x = op.inputs[1] scale = op.inputs[2] pop_mean = op.inputs[3] pop_var = op.inputs[4] grad_grad_x = grad[0] grad_grad_scale = grad[1] grad_grad_offset = grad[2] with backprop.GradientTape() as tape: tape.watch(grad_y) tape.watch(x) tape.watch(scale) grad_x, grad_scale, grad_offset = _BatchNormGrad( grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training) grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset] grad_grad_y, grad_x, grad_scale = tape.gradient( [grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial) return grad_grad_y, grad_x, grad_scale, None, None @ops.RegisterGradient("FusedBatchNormGradV2") def _FusedBatchNormGradGradV2(op, *grad): return _FusedBatchNormGradGrad(op, *grad) @ops.RegisterGradient("FusedBatchNormGradV3") def _FusedBatchNormGradGradV3(op, *grad): grad_grad_y, grad_x, grad_scale, _, _ = _FusedBatchNormGradGrad(op, *grad) return grad_grad_y, grad_x, grad_scale, None, None, None @ops.RegisterGradient("L2Loss") def _L2LossGrad(op, grad): """Return the gradients for L2Loss. Args: op: The L2LossOp for which we need to generate gradients. grad: Tensor containing a single number. Returns: The gradient, which is (x * grad). """ return op.inputs[0] * grad @ops.RegisterGradient("TopK") @ops.RegisterGradient("TopKV2") def _TopKGrad(op, grad, _): """Return the gradients for TopK. Args: op: The TopKOp for which we need to generate gradients. grad: Tensor. The gradients passed to the TopKOp. Returns: A list of two tensors, the first being the gradient w.r.t to the input and TopK, and the second being the gradient w.r.t. to the indices (all zero). """ in_shape = array_ops.shape(op.inputs[0]) ind_shape = array_ops.shape(op.outputs[1]) # int32 is not supported on GPU hence up-casting ind_lastdim = array_ops.gather( math_ops.cast(ind_shape, dtypes.int64), array_ops.size(ind_shape) - 1) # Flatten indices to 2D. ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim])) in_lastdim = array_ops.gather( math_ops.cast(in_shape, dtypes.int64), array_ops.size(in_shape) - 1) outerdim = array_ops.shape(ind_2d)[0] # Compute linear indices (flattened to 1D). ind = array_ops.reshape( ind_2d + math_ops.cast( array_ops.expand_dims( math_ops.range(0, math_ops.cast(outerdim, dtypes.int64) * in_lastdim, in_lastdim), -1), dtypes.int32), [-1]) # Substitute grad to appropriate locations and fill the rest with zeros, # finally reshaping it to the original input shape. return [ array_ops.reshape( array_ops.scatter_nd( array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]), [math_ops.reduce_prod(in_shape)]), in_shape), array_ops.zeros([], dtype=dtypes.int32) ] @ops.RegisterGradient("NthElement") def _NthElementGrad(op, grad): """Return the gradients for NthElement. Args: op: The NthElementOp for which we need to generate gradients. grad: Tensor. The gradients passed to the NthElementOp Returns: A list of two tensors, the first being the gradient w.r.t. the input, the second being the gradient w.r.t. the N (None). """ input = op.inputs[0] # pylint: disable=redefined-builtin output = op.outputs[0] # Compute the number of elements which equal to output in each reduction # dimension. If there are multiple elements then the gradient will be # divided between them. indicators = math_ops.cast( math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype) grad = array_ops.expand_dims(grad, -1) num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1) return [math_ops.div(indicators, num_selected) * grad, None]
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import binascii import pretend import pytest import six from cryptography import utils from cryptography.exceptions import ( AlreadyFinalized, InvalidSignature, _Reasons ) from cryptography.hazmat.backends.interfaces import CMACBackend from cryptography.hazmat.primitives.ciphers.algorithms import ( AES, ARC4, TripleDES ) from cryptography.hazmat.primitives.cmac import CMAC from tests.utils import ( load_nist_vectors, load_vectors_from_file, raises_unsupported_algorithm ) vectors_aes128 = load_vectors_from_file( "CMAC/nist-800-38b-aes128.txt", load_nist_vectors) vectors_aes192 = load_vectors_from_file( "CMAC/nist-800-38b-aes192.txt", load_nist_vectors) vectors_aes256 = load_vectors_from_file( "CMAC/nist-800-38b-aes256.txt", load_nist_vectors) vectors_aes = vectors_aes128 + vectors_aes192 + vectors_aes256 vectors_3des = load_vectors_from_file( "CMAC/nist-800-38b-3des.txt", load_nist_vectors) fake_key = b"\x00" * 16 @pytest.mark.cmac class TestCMAC(object): @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( AES(fake_key)), skip_message="Does not support CMAC." ) @pytest.mark.parametrize("params", vectors_aes) def test_aes_generate(self, backend, params): key = params["key"] message = params["message"] output = params["output"] cmac = CMAC(AES(binascii.unhexlify(key)), backend) cmac.update(binascii.unhexlify(message)) assert binascii.hexlify(cmac.finalize()) == output @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( AES(fake_key)), skip_message="Does not support CMAC." ) @pytest.mark.parametrize("params", vectors_aes) def test_aes_verify(self, backend, params): key = params["key"] message = params["message"] output = params["output"] cmac = CMAC(AES(binascii.unhexlify(key)), backend) cmac.update(binascii.unhexlify(message)) assert cmac.verify(binascii.unhexlify(output)) is None @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( TripleDES(fake_key)), skip_message="Does not support CMAC." ) @pytest.mark.parametrize("params", vectors_3des) def test_3des_generate(self, backend, params): key1 = params["key1"] key2 = params["key2"] key3 = params["key3"] key = key1 + key2 + key3 message = params["message"] output = params["output"] cmac = CMAC(TripleDES(binascii.unhexlify(key)), backend) cmac.update(binascii.unhexlify(message)) assert binascii.hexlify(cmac.finalize()) == output @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( TripleDES(fake_key)), skip_message="Does not support CMAC." ) @pytest.mark.parametrize("params", vectors_3des) def test_3des_verify(self, backend, params): key1 = params["key1"] key2 = params["key2"] key3 = params["key3"] key = key1 + key2 + key3 message = params["message"] output = params["output"] cmac = CMAC(TripleDES(binascii.unhexlify(key)), backend) cmac.update(binascii.unhexlify(message)) assert cmac.verify(binascii.unhexlify(output)) is None @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( AES(fake_key)), skip_message="Does not support CMAC." ) def test_invalid_verify(self, backend): key = b"2b7e151628aed2a6abf7158809cf4f3c" cmac = CMAC(AES(key), backend) cmac.update(b"6bc1bee22e409f96e93d7e117393172a") with pytest.raises(InvalidSignature): cmac.verify(b"foobar") @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( ARC4(fake_key), None), skip_message="Does not support CMAC." ) def test_invalid_algorithm(self, backend): key = b"0102030405" with pytest.raises(TypeError): CMAC(ARC4(key), backend) @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( AES(fake_key)), skip_message="Does not support CMAC." ) def test_raises_after_finalize(self, backend): key = b"2b7e151628aed2a6abf7158809cf4f3c" cmac = CMAC(AES(key), backend) cmac.finalize() with pytest.raises(AlreadyFinalized): cmac.update(b"foo") with pytest.raises(AlreadyFinalized): cmac.copy() with pytest.raises(AlreadyFinalized): cmac.finalize() @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( AES(fake_key)), skip_message="Does not support CMAC." ) def test_verify_reject_unicode(self, backend): key = b"2b7e151628aed2a6abf7158809cf4f3c" cmac = CMAC(AES(key), backend) with pytest.raises(TypeError): cmac.update(six.u('')) with pytest.raises(TypeError): cmac.verify(six.u('')) @pytest.mark.supported( only_if=lambda backend: backend.cmac_algorithm_supported( AES(fake_key)), skip_message="Does not support CMAC." ) def test_copy_with_backend(self, backend): key = b"2b7e151628aed2a6abf7158809cf4f3c" cmac = CMAC(AES(key), backend) cmac.update(b"6bc1bee22e409f96e93d7e117393172a") copy_cmac = cmac.copy() assert cmac.finalize() == copy_cmac.finalize() def test_copy(): @utils.register_interface(CMACBackend) class PretendBackend(object): pass pretend_backend = PretendBackend() copied_ctx = pretend.stub() pretend_ctx = pretend.stub(copy=lambda: copied_ctx) key = b"2b7e151628aed2a6abf7158809cf4f3c" cmac = CMAC(AES(key), backend=pretend_backend, ctx=pretend_ctx) assert cmac._backend is pretend_backend assert cmac.copy()._backend is pretend_backend def test_invalid_backend(): key = b"2b7e151628aed2a6abf7158809cf4f3c" pretend_backend = object() with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE): CMAC(AES(key), pretend_backend)
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for TensorFlow "Eager" Mode's Tensor class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import re import numpy as np from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util def _create_tensor(value, device=None, dtype=None): ctx = context.context() if device is None: device = ctx.device_name if dtype is not None: dtype = dtype.as_datatype_enum try: return ops.EagerTensor( value, context=ctx._handle, device=device, dtype=dtype) except core._NotOkStatusException as e: # pylint: disable=protected-access raise core._status_to_exception(e.code, e.message) class TFETensorTest(test_util.TensorFlowTestCase): def testScalarTensor(self): t = _create_tensor(3, dtype=dtypes.int32) self.assertAllEqual(t, _create_tensor(np.array(3))) self.assertEqual(dtypes.int32, t.dtype) self.assertEqual(0, t.shape.ndims) self.assertAllEqual([], t.shape.as_list()) self.assertIn("tf.Tensor", str(t)) self.assertIn("tf.Tensor", repr(t)) def testBadConstructorArgs(self): ctx = context.context() handle = ctx._handle device = ctx.device_name # Missing context. with self.assertRaisesRegexp( TypeError, r"Required argument 'context' \(pos 2\) not found"): ops.EagerTensor(1, device=device) # Missing device. with self.assertRaisesRegexp( TypeError, r"Required argument 'device' \(pos 3\) not found"): ops.EagerTensor(1, context=handle) # Bad dtype type. with self.assertRaisesRegexp(TypeError, "Expecting a DataType value for dtype. Got"): ops.EagerTensor(1, context=handle, device=device, dtype="1") # Following errors happen when trying to copy to GPU. if not context.context().num_gpus(): self.skipTest("No GPUs found") with ops.device("/device:GPU:0"): device = ctx.device_name # Bad context. with self.assertRaisesRegexp( TypeError, "Expecting a PyCapsule encoded context handle. Got"): ops.EagerTensor(1.0, context=1, device=device) # Bad device. with self.assertRaisesRegexp( TypeError, "Error parsing device argument to CopyToDevice"): ops.EagerTensor(1.0, context=handle, device=1) def testNumpyValue(self): values = np.array([3.0]) t = _create_tensor(values) self.assertAllEqual(values, t) def testNumpyValueWithCast(self): values = np.array([3.0], dtype=np.float32) t = _create_tensor(values, dtype=dtypes.float64) self.assertAllEqual(values, t) ctx = context.context() # Bad dtype value. with self.assertRaisesRegexp(TypeError, "Invalid dtype argument value"): ops.EagerTensor( values, context=ctx._handle, device=ctx.device_name, dtype=12345) def testNumpyOrderHandling(self): n = np.array([[1, 2], [3, 4]], order="F") t = _create_tensor(n) self.assertAllEqual([[1, 2], [3, 4]], t) def testNumpyArrayDtype(self): tensor = constant_op.constant([1.0, 2.0, 3.0]) numpy_tensor = np.asarray(tensor, dtype=np.int32) self.assertAllEqual(numpy_tensor, [1, 2, 3]) def testNdimsAgreesWithNumpy(self): numpy_tensor = np.asarray(1.0) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) numpy_tensor = np.asarray([1.0, 2.0, 3.0]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) def testCopy(self): t = constant_op.constant(1.0) tt = copy.copy(t) self.assertAllEqual(tt, 1.0) del tt tt = copy.deepcopy(t) self.assertAllEqual(tt, 1.0) del tt self.assertAllEqual(t, 1.0) def testConstantDtype(self): self.assertEqual(constant_op.constant(1.0, dtype=np.int64).dtype, dtypes.int64) def testTensorAndNumpyMatrix(self): expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32) actual = _create_tensor([[1.0, 2.0], [3.0, 4.0]]) self.assertAllEqual(expected, actual) self.assertEqual(np.float32, actual.dtype) self.assertEqual(dtypes.float32, actual.dtype) self.assertAllEqual([2, 2], actual.shape.as_list()) def testFloatDowncast(self): # Unless explicitly specified, float64->float32 t = _create_tensor(3.0) self.assertEqual(dtypes.float32, t.dtype) t = _create_tensor(3.0, dtype=dtypes.float64) self.assertEqual(dtypes.float64, t.dtype) def testBool(self): t = _create_tensor(False) if t: self.assertFalse(True) def testIntDowncast(self): t = _create_tensor(3) self.assertEqual(dtypes.int32, t.dtype) t = _create_tensor(3, dtype=dtypes.int64) self.assertEqual(dtypes.int64, t.dtype) t = _create_tensor(2**33) self.assertEqual(dtypes.int64, t.dtype) def testTensorCreationFailure(self): with self.assertRaises(ValueError): # Should fail because the each row of the Python object has a different # number of columns. self.assertEqual(None, _create_tensor([[1], [1, 2]])) def testMultiLineTensorStr(self): t = _create_tensor(np.eye(3)) tensor_str = str(t) self.assertIn("shape=%s, dtype=%s" % (t.shape, t.dtype.name), tensor_str) self.assertIn(str(t), tensor_str) def testMultiLineTensorRepr(self): t = _create_tensor(np.eye(3)) tensor_repr = repr(t) self.assertTrue(tensor_repr.startswith("<")) self.assertTrue(tensor_repr.endswith(">")) self.assertIn("id=%d, shape=%s, dtype=%s, numpy=\n%r" % (t._id, t.shape, t.dtype.name, t.numpy()), tensor_repr) def testTensorStrReprObeyNumpyPrintOptions(self): orig_threshold = np.get_printoptions()["threshold"] orig_edgeitems = np.get_printoptions()["edgeitems"] np.set_printoptions(threshold=2, edgeitems=1) t = _create_tensor(np.arange(10, dtype=np.int32)) self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t))) self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t))) # Clean up: reset to previous printoptions. np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems) def testZeroDimTensorStr(self): t = _create_tensor(42) self.assertIn("42, shape=(), dtype=int32", str(t)) def testZeroDimTensorRepr(self): t = _create_tensor(42) self.assertTrue(repr(t).startswith("<")) self.assertTrue(repr(t).endswith(">")) self.assertIn("id=%d, shape=(), dtype=int32, numpy=42" % t._id, repr(t)) def testZeroSizeTensorStr(self): t = _create_tensor(np.zeros(0, dtype=np.float32)) self.assertIn("[], shape=(0,), dtype=float32", str(t)) def testZeroSizeTensorRepr(self): t = _create_tensor(np.zeros(0, dtype=np.float32)) self.assertTrue(repr(t).startswith("<")) self.assertTrue(repr(t).endswith(">")) self.assertIn("id=%d, shape=(0,), dtype=float32, numpy=%r" % (t._id, t.numpy()), repr(t)) def testStringTensor(self): t_np_orig = np.array([[b"a", b"ab"], [b"abc", b"abcd"]]) t = _create_tensor(t_np_orig) t_np = t.numpy() self.assertTrue(np.all(t_np == t_np_orig), "%s vs %s" % (t_np, t_np_orig)) def testIterateOverTensor(self): l = [[1, 2], [3, 4]] t = _create_tensor(l) for list_element, tensor_element in zip(l, t): self.assertAllEqual(list_element, tensor_element.numpy()) def testStringTensorOnGPU(self): if not context.context().num_gpus(): self.skipTest("No GPUs found") with ops.device("/device:GPU:0"): with self.assertRaisesRegexp( RuntimeError, "Can't copy Tensor with type string to device"): _create_tensor("test string") class TFETensorUtilTest(test_util.TensorFlowTestCase): def testListOfThree(self): t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32) t2 = _create_tensor([[1, 2, 5], [3, 4, 5]], dtype=dtypes.int32) t3 = _create_tensor([[1], [3], [5], [6]], dtype=dtypes.int32) r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 0) self.assertAllEqual(np.array([3, 2, 4]), r.numpy()) r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 1) self.assertAllEqual(np.array([2, 3, 1]), r.numpy()) def testEmptyTensorList(self): a = pywrap_tensorflow.TFE_Py_TensorShapeSlice([], 0) self.assertTrue(isinstance(a, ops.EagerTensor)) self.assertEqual(0, a.numpy().size) def testTensorListContainsNonTensors(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( TypeError, r"Expected a list of EagerTensors but element 1 has type \"str\""): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, "abc"], 0) with self.assertRaisesRegexp( TypeError, r"Expected a list of EagerTensors but element 0 has type \"int\""): pywrap_tensorflow.TFE_Py_TensorShapeSlice([2, t1], 0) def testTensorListNotList(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( TypeError, r"tensors argument must be a list or a tuple. Got \"EagerTensor\""): pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2) def testNegativeSliceDim(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( ValueError, r"Slice dimension must be non-negative. Got -2"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], -2) def testUnicode(self): self.assertEqual(constant_op.constant(u"asdf").numpy(), b"asdf") def testFloatTensor(self): self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype) self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype) self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype) def testSliceDimOutOfRange(self): t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32) t2 = _create_tensor([1, 2], dtype=dtypes.int32) t3 = _create_tensor(2, dtype=dtypes.int32) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(2\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 2"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], 2) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(1\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 1"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2], 1) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(1\) must be smaller than rank of all tensors, " "but tensor at index 1 has rank 1"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2], 1) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(0\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 0"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t3], 0) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(0\) must be smaller than rank of all tensors, " "but tensor at index 2 has rank 0"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2, t1, t3], 0) if __name__ == "__main__": test.main()
# pylint:disable=unused-argument from typing import Optional, TYPE_CHECKING import logging from ailment import Block from ailment.expression import Load, Const, BinaryOp, UnaryOp from ailment.statement import Statement, Assignment, Call, ConditionalJump from ... import AnalysesHub from ..ailgraph_walker import AILGraphWalker from ..ailblock_walker import AILBlockWalker from .optimization_pass import OptimizationPass, OptimizationPassStage if TYPE_CHECKING: from angr import Project _l = logging.getLogger(name=__name__) class BlockWalker(AILBlockWalker): def __init__(self, project: 'Project'): super().__init__() self._project = project self._new_block: Optional[Block] = None # output def walk(self, block: Block): self._new_block = None super().walk(block) return self._new_block def _addr_belongs_to_ro_region(self, addr: int) -> bool: section = self._project.loader.find_section_containing(addr) if section is not None: return not section.is_writable segment = self._project.loader.find_segment_containing(addr) if segment is not None: return not segment.is_writable return False def _addr_belongs_to_got(self, addr: int) -> bool: section = self._project.loader.find_section_containing(addr) if section is not None: return section.name and "got" in section.name return False def _addr_belongs_to_object(self, addr: int) -> bool: obj = self._project.loader.find_object_containing(addr) return obj is not None def _handle_stmt(self, stmt_idx: int, stmt: Statement, block: Block): r = super()._handle_stmt(stmt_idx, stmt, block) if r is not None: # replace the original statement if self._new_block is None: self._new_block = block.copy() self._new_block.statements[stmt_idx] = r def _handle_Assignment(self, stmt_idx: int, stmt: Assignment, block: Block): new_dst = self._handle_expr(0, stmt.dst, stmt_idx, stmt, block) new_src = self._handle_expr(1, stmt.src, stmt_idx, stmt, block) if new_dst is not None or new_src is not None: new_stmt = Assignment(stmt.idx, stmt.dst if new_dst is None else new_dst, stmt.src if new_src is None else new_src, **stmt.tags ) return new_stmt return None def _handle_Call(self, stmt_idx: int, stmt: Call, block: Block): if stmt.args: i = 0 new_exprs = [ ] while i < len(stmt.args): arg = stmt.args[i] new_expr = self._handle_expr(i, arg, stmt_idx, stmt, block) new_exprs.append(new_expr) i += 1 if any(expr is not None for expr in new_exprs): # create a new statement new_args = [ (new_arg if new_arg is not None else old_arg) for new_arg, old_arg in zip(new_exprs, stmt.args)] new_stmt = Call(stmt.idx, stmt.target, calling_convention=stmt.calling_convention, prototype=stmt.prototype, args=new_args, ret_expr=stmt.ret_expr, **stmt.tags ) return new_stmt return None def _handle_Load(self, expr_idx: int, expr: Load, stmt_idx: int, stmt: Statement, block: Block): if isinstance(expr.addr, Const): # *(const_addr) # does it belong to a read-only section/segment? if self._addr_belongs_to_got(expr.addr.value) or \ self._addr_belongs_to_ro_region(expr.addr.value): w = self._project.loader.memory.unpack_word(expr.addr.value, expr.addr.bits // self._project.arch.byte_width, endness=self._project.arch.memory_endness ) if w is not None: # nice! replace it with the actual value return Const(None, None, w, expr.bits, **expr.tags) elif isinstance(expr.addr, Load) and expr.addr.bits == self._project.arch.bits: if isinstance(expr.addr.addr, Const): # *(*(const_addr)) # does it belong to a read-only section/segment? if self._addr_belongs_to_got(expr.addr.addr.value) or \ self._addr_belongs_to_ro_region(expr.addr.addr.value): w = self._project.loader.memory.unpack_word(expr.addr.addr.value, expr.addr.addr.bits // self._project.arch.byte_width, endness=self._project.arch.memory_endness ) if w is not None and self._addr_belongs_to_object(w): # nice! replace it with a load from that address return Load(expr.idx, Const(None, None, w, expr.addr.size, **expr.addr.addr.tags), expr.size, expr.endness, variable=expr.variable, variable_offset=expr.variable_offset, guard=expr.guard, alt=expr.alt, **expr.tags ) return super()._handle_Load(expr_idx, expr, stmt_idx, stmt, block) def _handle_BinaryOp(self, expr_idx: int, expr: BinaryOp, stmt_idx: int, stmt: Statement, block: Block): new_operands = [ self._handle_expr(0, expr.operands[0], stmt_idx, stmt, block), self._handle_expr(1, expr.operands[1], stmt_idx, stmt, block), ] if any(op is not None for op in new_operands): new_operands = [(new_op if new_op is not None else old_op) for new_op, old_op in zip(new_operands, expr.operands)] return BinaryOp(expr.idx, expr.op, new_operands, expr.signed, variable=expr.variable, variable_offset=expr.variable_offset, **expr.tags ) return None def _handle_UnaryOp(self, expr_idx: int, expr: BinaryOp, stmt_idx: int, stmt: Statement, block: Block): new_operand = self._handle_expr(0, expr.operands[0], stmt_idx, stmt, block) if new_operand is not None: return UnaryOp(expr.idx, expr.op, new_operand, expr.signed, variable=expr.variable, variable_offset=expr.variable_offset, **expr.tags ) return None def _handle_ConditionalJump(self, stmt_idx: int, stmt: ConditionalJump, block: Block): new_cond = self._handle_expr(0, stmt.condition, stmt_idx, stmt, block) new_true_target = self._handle_expr(1, stmt.true_target, stmt_idx, stmt, block) new_false_target = self._handle_expr(2, stmt.false_target, stmt_idx, stmt, block) if new_cond is not None or new_true_target is not None or new_false_target is not None: return ConditionalJump(stmt.idx, new_cond if new_cond is not None else stmt.condition, new_true_target if new_true_target is not None else stmt.true_target, new_false_target if new_false_target is not None else stmt.false_target, **stmt.tags ) return None class ConstantDereferencesSimplifier(OptimizationPass): """ Makes the following simplifications:: *(*(const_addr)) ==> *(value) iff *const_addr == value """ # TODO: This optimization pass may support more architectures and platforms ARCHES = ["X86", "AMD64", "ARMEL", "ARMHF", "ARMCortexM"] PLATFORMS = ["linux"] STAGE = OptimizationPassStage.AFTER_GLOBAL_SIMPLIFICATION def __init__(self, func, **kwargs): super().__init__(func, **kwargs) self._block_walker = BlockWalker(self.project) self.analyze() def _check(self): return True, None def _analyze(self, cache=None): # walk the entire graph and traverse each expression walker = AILGraphWalker(self._graph, handler=self._walk_block, replace_nodes=True) walker.walk() def _walk_block(self, block: Block) -> Optional[Block]: new_block = self._block_walker.walk(block) return new_block AnalysesHub.register_default("ConstantDereferencesSimplifier", ConstantDereferencesSimplifier)
# File: TscCommonLib.py ; This file is part of Twister. # version: 3.023 # Copyright (C) 2012-2013 , Luxoft # Authors: # Andrei Costachi <[email protected]> # Cristi Constantin <[email protected]> # Daniel Cioata <[email protected]> # Mihail Tudoran <[email protected]> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module contains common functions to communicate with the Central Engine. The most important class is `TscCommonLib`, which is exposed both in Twister tests and libraries. The exception classes are used to gracefully crash the Twister tests. """ from __future__ import print_function import os import copy import time import ast import inspect import binascii import platform import marshal import rpyc from rpyc import BgServingThread # This will work, because TWISTER_PATH is appended to sys.path. try: from ce_libs import PROXY_ADDR, USER, EP, SUT except Exception: raise Exception('CommonLib must run from Twister!\n') TWISTER_PATH = os.getenv('TWISTER_PATH') if not TWISTER_PATH: raise Exception('$TWISTER_PATH environment variable is not set!\n') __all__ = ['TscCommonLib', 'ExceptionTestFail', 'ExceptionTestAbort', 'ExceptionTestTimeout', 'ExceptionTestSkip'] # class TwisterException(Warning): """ Base custom exception. """ def __init__(self, value=''): self.value = value def __str__(self): return str(self.value) class ExceptionTestFail(TwisterException): """ Custom exception, caught by the EP. """ pass class ExceptionTestAbort(TwisterException): """ Custom exception, caught by the EP. """ pass class ExceptionTestTimeout(TwisterException): """ Custom exception, caught by the EP. """ pass class ExceptionTestSkip(TwisterException): """ Custom exception, caught by the EP. """ pass # class TscCommonLib(object): """ Base library for Twister tests and libraries. All functions are exposed automatically. """ platform_sys = platform.system().lower() """ Current platform. (ex: Windows, Linux) """ __ce_proxy = None """ Pointer to Central Engine via RPyc. """ proxy_path = PROXY_ADDR """ The Central Engine RPyc address. """ userName = USER """ The username running this test, or libary. """ epName = EP """ The EP running this test, or libary. """ global_vars = {} """ All global variables, shared between tests and libaries. """ interact = None _SUITE_ID = 0 _FILE_ID = 0 def __init__(self): """ Some initialization code. """ self._reload_libs() def _reload_libs(self): """ Internal function. Reload libraries. """ from common import iniparser ce_path = '{}/.twister_cache/{}/ce_libs/ce_libs.py'.format(TWISTER_PATH, self.epName) cfg = iniparser.ConfigObj(ce_path) for n, v in cfg.iteritems(): setattr(self, '_' + n, v) del cfg @property def sutName(self): """ Returns current SUT name. """ self._reload_libs() name = self.ce_proxy.get_suite_variable(self.epName, self._SUITE_ID, 'sut') return name @property def SUT(self): """ Returns current SUT name; same as sutName. """ self._reload_libs() name = self.ce_proxy.get_suite_variable(self.epName, self._SUITE_ID, 'sut') return name @property def SUITE_ID(self): """ Returns current suite ID. """ self._reload_libs() return self._SUITE_ID @property def FILE_ID(self): """ Returns current file ID. """ self._reload_libs() return self._FILE_ID @property def SUITE_NAME(self): """ Returns current suite name. """ self._reload_libs() name = self.ce_proxy.get_suite_variable(self.epName, self._SUITE_ID, 'name') return name @property def FILE_NAME(self): """ Returns current file name. """ self._reload_libs() name = self.ce_proxy.get_file_variable(self.epName, self._FILE_ID, 'file') if name: name = os.path.split(name)[1] return name @classmethod def _ce_proxy(cls): """ Dinamically connect to the Central Engine. This is a class method. """ stack = inspect.stack() # The upper stack is either the EP, or the library that derives this stack_fpath = stack[1][1] stack_fname = os.path.split(stack_fpath)[1] proxy = None # If the upper stack is not ExecutionProcess, the library is derived if stack_fname != 'ExecutionProcess.py': # The EP stack is always the last ep_code = stack[-1][0] # It's impossible to access the globals from the EP any other way p = ep_code.f_globals.get('ceProxy') if p: return p.root del stack, stack_fpath # Try to reuse the old connection try: cls.__ce_proxy.echo('ping') return cls.__ce_proxy except Exception: pass # RPyc config config = { 'allow_pickle': True, 'allow_getattr': True, 'allow_setattr': True, 'allow_delattr': True, 'allow_all_attrs': True, } ce_ip, ce_port = cls.proxy_path.split(':') # If the old connection is broken, connect to the RPyc server try: # Transform XML-RPC port into RPyc Port; RPyc port = XML-RPC port + 10 ! ce_port = int(ce_port) + 10 proxy = rpyc.connect(ce_ip, ce_port, config=config) proxy.root.hello('lib::{}'.format(cls.epName)) except Exception: print('*ERROR* Cannot connect to CE path `{}`! Exiting!'.format(cls.proxy_path)) raise Exception('Cannot connect to CE') # Authenticate on RPyc server try: proxy.root.login(cls.userName, 'EP') except Exception: print('*ERROR* Cannot authenticate on CE path `{}`! Exiting!'.format(cls.proxy_path)) raise Exception('Cannot authenticate on CE') # Launch bg server try: BgServingThread(proxy) cls.__ce_proxy = proxy.root return cls.__ce_proxy except Exception: print('*ERROR* Cannot launch Bg serving thread! Exiting!') raise Exception('Cannot launch Bg thread') @property def ce_proxy(self): """ Pointer to the Central Engine RPyc connection. """ return self._ce_proxy() @staticmethod def test_fail(reason=''): """ Gracefully crash test with status `Fail`. """ raise ExceptionTestFail(reason) @staticmethod def test_abort(reason=''): """ Gracefully crash test with status `Abort`. """ raise ExceptionTestAbort(reason) @staticmethod def test_timeout(reason=''): """ Gracefully crash test with status `Timeout`. """ raise ExceptionTestTimeout(reason) @staticmethod def test_skip(reason=''): """ Gracefully crash test with status `Skip`. """ raise ExceptionTestSkip(reason) def interact(self, type, msg, timeout=0, options={}): """ This function should be called only from a test! It gives to user the opportunity to interact with tests. Params: type: type of the interaction: confirmation window, input window, continue/cancel window msg: the message that will be printed in the window return: True/False or a string """ self.interact = None print('\n>> Waiting for user interaction >>') id = binascii.hexlify(os.urandom(4)) self.ce_proxy.interact(id, self.epName, type, msg, timeout, options) time.sleep(1) counter = 0.0 while self.interact == None: time.sleep(0.5) if timeout > 0: counter += 0.5 if counter > timeout: break counter = 0.0 reason = 'User action' if type == 'decide' and self.interact in [None, 'false']: # abort test! if self.interact == None: reason = 'Decide timeout expired!' else: reason = 'Test aborted by user!' self.ce_proxy.set_file_status(self.epName, self._FILE_ID, 5, timeout) self.ce_proxy.set_ep_status(self.epName, 2) print('\n>> Test aborted by user! >>') self.ce_proxy.remove_interact(id, self.epName, type, msg, timeout, options, reason) raise ExceptionTestAbort(reason) if self.interact == None: reason = 'Timeout expired' print('Interaction timeout expired!') if type == 'msg': self.interact = True elif type == 'options' and options: self.interact = options['default'] self.ce_proxy.set_ep_status(self.epName, 2) self.ce_proxy.remove_interact(id, self.epName, type, msg, timeout, options, reason) print('\n>> Interaction response: {} >>'.format(self.interact)) return self.interact def log_msg(self, log_type, log_message): """ Send a message in a specific log, on Central Engine. """ if not log_message: log_message = '' else: log_message = str(log_message) self.ce_proxy.log_message(log_type, log_message) @classmethod def get_global(cls, var): """ Function to get variables saved from Test files. The same data must be used, both in Testcase and derived Libraries. """ if var in cls.global_vars: return cls.global_vars[var] # Else... ce = cls._ce_proxy() return ce.get_global_variable(var) @classmethod def set_global(cls, var, value): """ Function to keep variables sent from Test files. The same data must be used, both in Testcase and derived Libraries. """ try: marshal.dumps(value) ce = cls._ce_proxy() return ce.set_global_variable(var, value) except Exception: cls.global_vars[var] = value return True def get_config(self, cfg_path, var_path=''): """ Get data from a config, using the full path to the config file and the full path to a config variable in that file. """ return self.ce_proxy.get_config(cfg_path, var_path) def set_binding(self, config_name, component, sut): """ Method to set a config->SUT binding """ return self.ce_proxy.set_binding(config_name, component, sut) def del_binding(self, config_name, component): """ Method that deletes a binding from the list of config->SUT bindings """ return self.ce_proxy.del_binding(config_name, component) def get_binding(self, cfg_root): """ Function to get a config -> SUT binding. """ bindings = self.ce_proxy.get_user_variable('bindings') or {} return bindings.get(cfg_root) def get_bind_id(self, component_name, test_config='default_binding'): """ Function to get a config -> SUT binding ID. Shortcut function. """ bindings = self.ce_proxy.get_user_variable('bindings') or {} # Fix cfg root maybe ? if not test_config: test_config = 'default_binding' config_data = bindings.get(test_config, {}) # If the component cannot be found in the requested config, search in default config if test_config != 'default_binding' and (component_name not in config_data): config_data = bindings.get('default_binding', {}) return config_data.get(component_name, False) def get_bind_name(self, component_name, test_config='default_binding'): """ Function to get a cfg -> SUT binding name. Shortcut function. """ sid = self.get_bind_id(component_name, test_config) if not sid: return False sut = self.get_sut(sid) if not sut: sut = {} return sut.get('path', False) def get_iter_value(self, iter_name, cfg_name=None): """ Find iteration value, for a specific iterator name. `iter_name` is the name of the iterator to search. Returns The iterator value. """ iterNr = self.ce_proxy.get_file_variable(self.epName, self._FILE_ID, 'iterationNr') found = [] if cfg_name is not None: # if the test configuration name is valid, find it in iterNr cfg_iters = [cfg_item.strip() for cfg_item in iterNr.strip().\ split(',') if cfg_name in cfg_item.split('#')[0]] # now we have a list with iterators declared in cfg_name file # search the specified one found = [iter_val.split('=')[-1] for iter_val in cfg_iters \ if '#{}='.format(iter_name) in iter_val] else: # no test configuration name provided, search for all iterators # that match iter_name found = [i.split('=')[-1] for i in iterNr.split(',') \ if '#{}='.format(iter_name) in i] if not found: return '' return found[0] def get_iter_comp(self, iter_name, cfg_name=None): """ Find component name that is parent of specific iterator name. `iter_name` is the name of the iterator to search. Returns the component name """ iterNr = self.ce_proxy.get_file_variable(self.epName, self._FILE_ID, 'iterationNr') found = [] if cfg_name is not None: # if the test configuration name is valid, find it in iterNr cfg_iters = [cfg_item.strip() for cfg_item in iterNr.strip().\ split(',') if cfg_name in cfg_item.split('#')[0]] # now we have a list with iterators declared in cfg_name file # search the specified one found = [iter_val.split('#')[1] for iter_val in cfg_iters \ if '#{}='.format(iter_name) in iter_val] else: # no test configuration name provided, search for all iterators # that match iter_name found = [i.split('#')[1] for i in iterNr.split(',') \ if '#{}='.format(iter_name) in i] if not found: return '' return found[0] def count_project_files(self): """ Returns the number of files inside the current project. """ data = self.ce_proxy.get_ep_variable(self.epName, 'suites') SuitesManager = copy.deepcopy(data) files = SuitesManager.get_files(recursive=True) return len(files) def current_file_index(self): """ Returns the index of this file in the project. If the file ID is not found, the count will fail. """ data = self.ce_proxy.get_ep_variable(self.epName, 'suites') SuitesManager = copy.deepcopy(data) files = SuitesManager.get_files(recursive=True) try: return files.index(self.FILE_ID) except Exception: return -1 def count_suite_files(self): """ Returns the number of files inside a suite ID. If the suite ID is not found, the count will fail. """ data = self.ce_proxy.get_suite_variable(self.epName, self.SUITE_ID, 'children') SuitesManager = copy.deepcopy(data) files = SuitesManager.keys() # First level of files, depth=1 return len(files) def current_fsuite_index(self): """ Returns the index of this file, inside this suite. If the suite ID and file ID are not found, the count will fail. """ data = self.ce_proxy.get_suite_variable(self.epName, self.SUITE_ID, 'children') SuitesManager = copy.deepcopy(data) files = SuitesManager.keys() # First level of files, depth=1 try: return files.index(self.FILE_ID) except Exception: return -1 def py_exec(self, code_string): """ Expose Python functions and class instances in TCL. """ if not isinstance(code_string, str): print('py_exec: Error, the code must be a string `{}`!'.format(code_string)) return False try: ret = eval(code_string, self.global_vars, self.global_vars) except Exception, e: print('py_exec: Error execution code `{}`! Exception `{}`!'.format(code_string, e)) ret = False return ret def _encode_unicode(self, input): """ Encode data to UTF-8. """ if isinstance(input, dict): return {self._encode_unicode(key): self._encode_unicode(value) for key, value in input.iteritems()} elif isinstance(input, list): return [self._encode_unicode(elem) for elem in input] elif isinstance(input, unicode): return input.encode('utf-8') else: return input def get_tb(self, query, dtype=unicode): """ Get TB content. """ try: data = self.ce_proxy.get_tb(query) if dtype == str: return self._encode_unicode(data) else: return data except Exception as e: print('Error on get Resource! `{}`!'.format(e)) return None def get_resource(self, query, dtype=unicode): """ Get TB content. Alias function for `get_tb`. """ return self.get_tb(query, dtype) def create_new_tb(self, name, parent='/', props={}): """ Update a TB. """ try: return self.ce_proxy.create_new_tb(name, parent, props) except Exception as e: print('Error on create Resource! `{}`!'.format(e)) return None def create_component_tb(self, name, parent='/', props={}): """ Update a TB. """ try: return self.ce_proxy.create_component_tb(name, parent, props) except Exception as e: print('Error on create Resource! `{}`!'.format(e)) return None def update_meta_tb(self, name, parent='/', props={}): """ Update a TB. """ try: return self.ce_proxy.update_meta_tb(name, parent, props) except Exception as e: print('Error on update Resource! `{}`!'.format(e)) return None def set_tb(self, name, parent='/', props={}): """ Update a TB. High level function. """ if isinstance(props, str) or isinstance(props, unicode): try: props = ast.literal_eval(props) except Exception: pass try: return self.ce_proxy.set_tb(name, parent, props) except Exception as e: print('Error on set Resource! `{}`!'.format(e)) return None def set_resource(self, name, parent='/', props={}): """ Update a TB. Alias function for `set_tb`. """ return self.set_tb(name, parent, props) def rename_tb(self, res_query, new_name): """ Rename a TB. """ try: return self.ce_proxy.rename_tb(res_query, new_name) except Exception as e: print('Error on rename Resource! `{}`!'.format(e)) return None def rename_resource(self, res_query, new_name): """ Rename a TB. Alias function for `rename_tb`. """ return self.rename_tb(res_query, new_name) def delete_tb(self, query): """ Delete a TB. """ try: return self.ce_proxy.delete_tb(query) except Exception as e: print('Error on delete Resource! `{}`!'.format(e)) return None def delete_resource(self, query): """ Delete a TB. Alias function for `delete_tb`. """ return self.delete_tb(query) def get_sut(self, query, follow_links=False, dtype=unicode): """ Get SUT content. query : id/path of sut """ try: data = self.ce_proxy.get_sut(query, follow_links) if dtype == str: return self._encode_unicode(data) else: return data except Exception as e: print('Error on get SUT! `{}`!'.format(e)) return None def get_info_sut(self, query): """ Get SUT info. """ try: return self.ce_proxy.get_info_sut(query) except Exception as e: print('Error on get info SUT! `{}`!'.format(e)) return None def create_new_sut(self, name, parent='/', props={}): """ Update a SUT. """ try: return self.ce_proxy.create_new_sut(name, parent, props) except Exception as e: print('Error on create SUT! `{}`!'.format(e)) return None def create_component_sut(self, name, parent='/', props={}): """ Update a SUT. """ try: return self.ce_proxy.create_component_sut(name, parent, props) except Exception as e: print('Error on create SUT! `{}`!'.format(e)) return None def update_meta_sut(self, name, parent='/', props={}): """ Update a SUT. """ try: return self.ce_proxy.update_meta_sut(name, parent, props) except Exception as e: print('Error on update SUT! `{}`!'.format(e)) return None def set_sut(self, name, parent='/', props={}): """ Update a SUT. """ try: return self.ce_proxy.set_sut(name, parent, props) except Exception as e: print('Error on set SUT! `{}`!'.format(e)) return None def rename_sut(self, res_query, new_name): """ Rename a SUT. """ try: return self.ce_proxy.rename_sut(res_query, new_name) except Exception as e: print('Error on rename SUT! `{}`!'.format(e)) return None def delete_sut(self, query): """ Delete a SUT. """ try: return self.ce_proxy.delete_sut(query) except Exception as e: print('Error on delete SUT! `{}`!'.format(e)) return None def delete_component_sut(self, query): """ Delete a SUT component. """ try: return self.ce_proxy.delete_component_sut(query) except Exception as e: print('Error on delete SUT component! `{}`!'.format(e)) return None def reserve_tb(self, query): """ Reserve a resource. You can then edit the resource. """ try: return self.ce_proxy.reserve_tb(query) except Exception as e: print('Error on reserve resource! `{}`!'.format(e)) return None def save_reserved_tb(self, query): """ Save changes. Don't release. """ try: return self.ce_proxy.save_reserved_tb(query) except Exception as e: print('Error on save resource! `{}`!'.format(e)) return None def save_release_reserved_tb(self, query): """ Save changes. Release the resource. """ try: return self.ce_proxy.save_release_reserved_tb(query) except Exception as e: print('Error on save & release resource! `{}`!'.format(e)) return None def discard_release_reserved_tb(self, query): """ Drop changes. Release the resource. """ try: return self.ce_proxy.discard_release_reserved_tb(query) except Exception as e: print('Error on discard & release resource! `{}`!'.format(e)) return None def reserve_sut(self, query): """ Reserve a SUT. You can then edit the SUT. """ try: return self.ce_proxy.reserve_sut(query) except Exception as e: print('Error on reserve SUT! `{}`!'.format(e)) return None def save_reserved_sut(self, query): """ Save changes. Don't release. """ try: return self.ce_proxy.save_reserved_sut(query) except Exception as e: print('Error on save SUT! `{}`!'.format(e)) return None def save_release_reserved_sut(self, query): """ Save changes. Release the SUT. """ try: return self.ce_proxy.save_release_reserved_sut(query) except Exception as e: print('Error on save & release SUT! `{}`!'.format(e)) return None def discard_release_reserved_sut(self, query): """ Drop changes. Release the SUT. """ try: return self.ce_proxy.discard_release_reserved_sut(query) except Exception as e: print('Error on discard & release SUT! `{}`!'.format(e)) return None def is_tb_reserved(self, query): """ Yes or No. """ try: result = self.ce_proxy.is_tb_reserved(query) if not result or result == 'false': return False else: return True except Exception as e: print('Error on discard & release SUT! `{}`!'.format(e)) return None def get_tb_user(self, query): """ User name. """ try: result = self.ce_proxy.is_tb_reserved(query) if not result or result == 'false': return False else: return result except Exception as e: print('Error on discard & release SUT! `{}`!'.format(e)) return None def is_sut_reserved(self, query): """ Yes or No. """ try: result = self.ce_proxy.is_sut_reserved(query) if not result or result == 'false': return False else: return True except Exception as e: print('Error on discard & release SUT! `{}`!'.format(e)) return None def get_sut_user(self, query): """ User name. """ try: result = self.ce_proxy.is_sut_reserved(query) if not result or result == 'false': return False else: return result except Exception as e: print('Error on discard & release SUT! `{}`!'.format(e)) return None # Eof()
# ***************************************************************** # Copyright (c) 2013 Massachusetts Institute of Technology # # Developed exclusively at US Government expense under US Air Force contract # FA8721-05-C-002. The rights of the United States Government to use, modify, # reproduce, release, perform, display or disclose this computer software and # computer software documentation in whole or in part, in any manner and for # any purpose whatsoever, and to have or authorize others to do so, are # Unrestricted and Unlimited. # # Licensed for use under the BSD License as described in the BSD-LICENSE.txt # file in the root directory of this release. # # Project: SPAR # Authors: OMD # Description: Unit tests for ArrayView # # Modifications: # Date Name Modification # ---- ---- ------------ # 13 Jan 2012 omd Original Version # ***************************************************************** import unittest from array_view import ArrayView from array_view import get_array_view_or_slice import numpy class ArrayViewTest(unittest.TestCase): def test_is_view(self): """Make sure that a view is really a view: e.g. that changes to it are reflected in the base array and vice versa.""" base = numpy.array([1, 2, 3, 4, 5]) view = ArrayView(base, [0, 2]) self.assertEqual(view[0], 1) self.assertEqual(view[1], 3) # Now modify base. The changes should be reflected in the view. base[2] = 100 self.assertEqual(view[0], 1) self.assertEqual(view[1], 100) self.assertEqual(base[0], 1) self.assertEqual(base[2], 100) # Now modify the view. The changes should be reflected in base. view[1] = 3 self.assertEqual(view[0], 1) self.assertEqual(view[1], 3) self.assertEqual(base[0], 1) self.assertEqual(base[2], 3) def test_view_slices(self): """We should be able to get and set via slices as well and still preserve the view property.""" base = numpy.arange(0, 100) view = ArrayView(base, [0, 20, 22, 50, 77]) self.assertEqual(view[0], 0) self.assertEqual(view[1], 20) self.assertEqual(view[2], 22) self.assertEqual(view[3], 50) self.assertEqual(view[4], 77) # The [:] slice should just be the same view again. same_view = view[:] self.assertEqual(same_view[0], 0) self.assertEqual(same_view[1], 20) self.assertEqual(same_view[2], 22) self.assertEqual(same_view[3], 50) self.assertEqual(same_view[4], 77) # And it should really be a view same_view[1] = 1 self.assertEqual(same_view[1], 1) self.assertEqual(view[1], 1) self.assertEqual(base[20], 1) same_view[1] = 20 base[77] = -1 self.assertEqual(view[4], -1) self.assertEqual(same_view[4], -1) base[77] = 77 # And other slices should work as well middle_two = view[1:3] self.assertEqual(middle_two[0], 20) self.assertEqual(middle_two[1], 22) # and it too should be a view middle_two[0] = 0 self.assertEqual(middle_two[0], 0) self.assertEqual(base[20], 0) middle_two[0] = 20 def test_iteration(self): base = numpy.arange(0, 100) view = ArrayView(base, [0, 20, 22, 50, 77]) expected = [0, 20, 22, 50, 77] for expected, observed in zip(expected, view): self.assertEqual(expected, observed) def test_len(self): base = numpy.arange(0, 100) view = ArrayView(base, [0, 20, 22, 50, 77]) self.assertEqual(len(view), 5) def test_contains(self): base = numpy.arange(0, 100) view = ArrayView(base, [0, 20, 22, 50, 77]) self.assertTrue(20 in view) self.assertTrue(22 in view) self.assertFalse(1 in view) self.assertFalse(19 in view) def test_add(self): """Simple addition of arrays should work.""" base = numpy.array([0, 1, 1, 0]) # view = [1, 0] view = ArrayView(base, [1, 3]) added = view + numpy.array([1, 1]) self.assertEqual(added[0], 2) self.assertEqual(added[1], 1) def test_plus_equal(self): """The += operator should work and since it's a view it should modify both the view and the base array.""" base = numpy.array([0, 1, 1, 0]) # view = [1, 0] view = ArrayView(base, [1, 3]) view += numpy.array([1, 1]) self.assertEqual(view[0], 2) self.assertEqual(view[1], 1) # make sure base was modified too self.assertEqual(base[1], 2) self.assertEqual(base[3], 1) def test_plus_equal_two_views(self): """Test that += works correctly with a view on the left and right of the assignment.""" base1 = numpy.array([0, 1, 1, 0]) base2 = numpy.array([1, 1, 1, 1]) # view1 == [1, 0] view1 = ArrayView(base1, [1, 3]) # veiw2 == [1, 1] view2 = ArrayView(base2, [0, 2]) view1 += view2 self.assertEqual(view1[0], 2) self.assertEqual(view1[1], 1) # make sure base was modified too self.assertEqual(base1[1], 2) self.assertEqual(base1[3], 1) # view2 and base2 should be unmodified self.assertEqual(view2[0], 1) self.assertEqual(view2[1], 1) self.assertTrue(numpy.all(base2 == numpy.array([1, 1, 1, 1]))) def test_other_in_place_math(self): """I've overrident most of the other "in place" math operators like -=, %=, etc. Here we test some of them and, in parcticular, make sure they're modifying the base array.""" base = numpy.array([0, 1, 1, 0]) # view = [1, 0] view = ArrayView(base, [1, 3]) view -= numpy.array([1, 2]) self.assertEqual(view[0], 0) self.assertEqual(view[1], -2) self.assertEqual(base[0], 0) self.assertEqual(base[1], 0) self.assertEqual(base[2], 1) self.assertEqual(base[3], -2) base = numpy.array([0, 1, 1, 0]) # view = [1, 0] view = ArrayView(base, [1, 3]) view *= numpy.array([2, 2]) self.assertEqual(view[0], 2) self.assertEqual(view[1], 0) self.assertEqual(base[0], 0) self.assertEqual(base[1], 2) self.assertEqual(base[2], 1) self.assertEqual(base[3], 0) def test_get_array_view_or_slice(self): """Make sure the get_array_view_or_slice method returns the right thing and that the returned slice contains the right data.""" base = numpy.array([0, 1, 2, 3, 4, 5]) # contiguous indices should be a slice s1 = get_array_view_or_slice(base, [0, 1, 2, 3]) self.assertEqual(type(s1), numpy.ndarray) self.assertEqual(len(s1), 4) self.assertEqual(s1[0], 0) self.assertEqual(s1[1], 1) self.assertEqual(s1[2], 2) self.assertEqual(s1[3], 3) # and the slice should act as a view s1[0] = -1 self.assertEqual(base[0], -1) base[0] = 100 self.assertEqual(s1[0], 100) # Put it back the way it was. base[0] = 0 # And make sure it works if the start index isn't 0 s2 = get_array_view_or_slice(base, [2, 3]) self.assertEqual(type(s2), numpy.ndarray) self.assertEqual(len(s2), 2) self.assertEqual(s2[0], 2) self.assertEqual(s2[1], 3) # And make sure an ArrayView is returned if the indices aren't # contiguous av = get_array_view_or_slice(base, [0, 1, 3]) self.assertEqual(type(av), ArrayView) self.assertEqual(len(av), 3) self.assertEqual(av[0], 0) self.assertEqual(av[1], 1) self.assertEqual(av[2], 3)
# -*- coding: utf-8 -*- # # Copyright 2020-2021 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Resources management functions """ import sys import bigml.api from bigmler.utils import (dated, get_url, log_message, check_resource, is_shared, check_resource_error, log_created_resources) from bigmler.reports import report from bigmler.resourcesapi.common import set_basic_args, update_attributes, \ update_json_args, configure_input_fields, \ check_fields_struct from bigmler.resourcesapi.common import SEED, DS_NAMES, \ ALL_FIELDS_QS def set_basic_dataset_args(args, name=None): """Return dataset basic arguments dict """ if name is None: name = args.name dataset_args = set_basic_args(args, name) if args.sample_rate != 1 and args.no_model: dataset_args.update({ "seed": SEED if args.seed is None else args.seed, "sample_rate": args.sample_rate }) if hasattr(args, "range") and args.range_: dataset_args.update({ "range": args.range_ }) return dataset_args def set_dataset_args(args, fields, multi_label_data=None): """Return dataset arguments dict """ dataset_args = set_basic_dataset_args(args) objective_field = (None if not hasattr(args, 'objective_field') else args.objective_field) if multi_label_data is not None and objective_field is None: objective_field = multi_label_data['objective_name'] if objective_field is not None and fields is not None: try: objective_id = fields.field_id(objective_field) except ValueError as exc: sys.exit(exc) dataset_args.update(objective_field={'id': objective_id}) if hasattr(args, 'juxtapose') and args.juxtapose: dataset_args.update({"juxtapose": args.juxtapose}) if hasattr(args, 'sql_query') and args.sql_query: dataset_args.update({"sql_query": args.sql_query}) if hasattr(args, 'sql_output_fields_') and args.sql_output_fields_: dataset_args.update({"sql_output_fields": args.sql_output_fields_}) if hasattr(args, 'json_query_') and args.json_query_: dataset_args.update({"json_query": args.json_query_}) if args.json_filter: dataset_args.update(json_filter=args.json_filter) elif args.lisp_filter: dataset_args.update(lisp_filter=args.lisp_filter) if args.dataset_fields_ and fields is not None: input_fields = configure_input_fields(fields, args.dataset_fields_) dataset_args.update(input_fields=input_fields) if (hasattr(args, 'multi_label') and args.multi_label and multi_label_data is not None): dataset_args.update( user_metadata={'multi_label_data': multi_label_data}) if fields and args.import_fields: fields_struct = fields.new_fields_structure(args.import_fields) check_fields_struct(fields_struct, "dataset") update_attributes(dataset_args, fields_struct) if 'dataset' in args.json_args: update_json_args(dataset_args, args.json_args.get('dataset'), fields) return dataset_args def set_dataset_split_args(name, description, args, sample_rate=1, out_of_bag=False, multi_label_data=None): """Return dataset arguments dict to split a dataset """ dataset_args = { "name": name, "description": description, "category": args.category, "tags": args.tag, "seed": SEED if args.seed is None else args.seed, "sample_rate": sample_rate, "out_of_bag": out_of_bag } if hasattr(args, "range") and args.range_: dataset_args.update({ "range": args.range_ }) if (hasattr(args, "multi_label") and args.multi_label and multi_label_data is not None): dataset_args.update( user_metadata={'multi_label_data': multi_label_data}) return dataset_args def create_dataset(origin_resource, dataset_args, args, api=None, path=None, session_file=None, log=None, dataset_type=None): """Creates remote dataset from source, dataset, cluster or datasets list """ if api is None: api = bigml.api.BigML() message = dated("Creating dataset.\n") log_message(message, log_file=session_file, console=args.verbosity) check_fields_struct(dataset_args, "dataset") # if --json-query or --sql-query are used and no names are set for # the datasets, we create default naming to A, B, C, etc. for the datasets # to be used as origin if ((hasattr(args, 'sql_query') and args.sql_query) or \ (hasattr(args, 'json_query') and args.sql_query)) and \ isinstance(origin_resource, list) and \ ((not isinstance(origin_resource[0], dict)) or \ origin_resource[0].get("name") is None): for index, element in enumerate(origin_resource): if index < len(DS_NAMES): if isinstance(element, dict): if element.get("resource") is not None: element = {"id": element["resource"]} element.update({"name": DS_NAMES[index]}) origin_resource[index] = element elif isinstance(element, str): origin_resource[index] = {"id": element, "name": DS_NAMES[index]} dataset = api.create_dataset(origin_resource, dataset_args, retries=None) suffix = "_" + dataset_type if dataset_type else "" log_created_resources("dataset%s" % suffix, path, bigml.api.get_dataset_id(dataset), mode='a') dataset_id = check_resource_error(dataset, "Failed to create dataset: ") try: dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS, raise_on_error=True) except Exception as exception: sys.exit("Failed to get a finished dataset: %s" % str(exception)) message = dated("Dataset created: %s\n" % get_url(dataset)) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % dataset_id, log_file=log) if args.reports: report(args.reports, path, dataset) return dataset def get_dataset(dataset, api=None, verbosity=True, session_file=None): """Retrieves the dataset in its actual state """ if api is None: api = bigml.api.BigML() if (isinstance(dataset, str) or bigml.api.get_status(dataset)['code'] != bigml.api.FINISHED): message = dated("Retrieving dataset. %s\n" % get_url(dataset)) log_message(message, log_file=session_file, console=verbosity) dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS) check_resource_error(dataset, "Failed to get dataset: ") return dataset def publish_dataset(dataset, args, api=None, session_file=None): """Publishes dataset and sets its price (if any) """ if api is None: api = bigml.api.BigML() public_dataset = {"private": False} if args.dataset_price: public_dataset.update(price=args.dataset_price) dataset = update_dataset(dataset, public_dataset, args, api=api, session_file=session_file) check_resource_error(dataset, "Failed to update dataset: ") dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS) return dataset def update_dataset(dataset, dataset_args, args, api=None, path=None, session_file=None): """Updates dataset properties """ if api is None: api = bigml.api.BigML() message = dated("Updating dataset. %s\n" % get_url(dataset)) log_message(message, log_file=session_file, console=args.verbosity) dataset = api.update_dataset(dataset, dataset_args) if is_shared(dataset): message = dated("Shared dataset link. %s\n" % get_url(dataset, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, dataset) check_resource_error(dataset, "Failed to update dataset: ") dataset = check_resource(dataset, api.get_dataset, query_string=ALL_FIELDS_QS) return dataset
import socket, struct, platform #region Global Variables blocked_ports = [] open_ports = [] #Set the default timeout value to two seconds socket.setdefaulttimeout(2) #endregion def port_check(ports, ip_address, vsphere_object): for element in ports: v_port = int(element) #connect to the ports sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((ip_address,v_port)) if result == 0: open_ports.append(v_port) else: blocked_ports.append(v_port) print(vsphere_object + ' (' + ip_address + ')' + ' may experience issues due to the following being blocked:') print('') for i in blocked_ports: for key, value in ports.items(): if str(i) == key: print '- '+ str(value) + '' + ' (Port ' + str(i) + ')' print('') #Reset the block ports list back to zero del blocked_ports[:] def esxi__increase_ip(): # x.x.x.x string -> integer ip2int = lambda ipstr: struct.unpack('!I', socket.inet_aton(ipstr))[0] # x.x.x.x string -> integer int2ip = lambda n: socket.inet_ntoa(struct.pack('!I', n)) #Convert the first IP address to an integer esxi_initial_int = ip2int(esxi_input.initial_ip) #Second host IP Address Address esxi_increase_int = (esxi_initial_int + 1) esxi_increase_ip = int2ip(esxi_increase_int) esxi_input.ip.append(esxi_increase_ip) def vcenter_input(): while True: try: print'' vcenter_input.ip = raw_input('Please enter the vCenter IP Address: ') print('') # Check for a valid IP Address socket.inet_aton(vcenter_input.ip) # if not a valid ip show the following error except socket.error: print '' print('*** Error: Please enter a valid IP Address ***') print '' else: break def esxi_input(): while True: print('') esxi_input.hosts = raw_input('Enter the number of ESXi hosts you would like to check: ') # Verify that the user enters a 1 digit organization code if len(esxi_input.hosts) > 1 or esxi_input.hosts.isdigit() == False: print('') print(' *** Error: Please enter a valid number ***') print('') continue #user successfully entered a organization code name else: break #region ESXi Port Scan if int(esxi_input.hosts) >= 1: while True: try: print'' esxi_input.initial_ip = raw_input('Please enter the starting ESXi Host IP Address: ') # Check for a valid IP Address socket.inet_aton(esxi_input.initial_ip) # if not a valid ip show the following error except socket.error: print '' print('*** Error: Please enter a valid IP Address ***') print '' else: break #create the list that will house all of the ESXi IP Address esxi_input.ip = [] #Add the first user defined ESXi Host IP Address to the list esxi_input.ip.append(esxi_input.initial_ip) #region Define the variables that allow us to increase an IP #endregion def executing_script(): print('') print('*** Executing script. This may take several minutes.... ***') print('') print('') print('The following vSphere objects are currently supported:') print('') print('- vCenter 5.x') print('- ESXi 5.x') print('') while True: object_to_scan = raw_input('Which object would you like to scan (vCenter, ESXi, Both): ').lower() if object_to_scan not in ['vcenter', 'esxi', 'both']: print('') print('*** Error: Please enter "vCenter", "ESXi" , or "Both". ***') print('') continue else: break if object_to_scan == 'vcenter': esxi_input.hosts = 0 vcenter_input() executing_script() elif object_to_scan == 'esxi': esxi_input() executing_script() else: vcenter_input() esxi_input() executing_script() #region vCenter if object_to_scan in ['vcenter', 'both']: #region Ports required for vCenter 5.x vcenter_5 = {'25': 'Email notifications', '53': 'DNS lookups', '80': 'direct HTTP connections', '80': 'DPM with IPMI (iLO/BMC) ASF Remote Management and Control Protocol', '88': 'AD Authentication', '135': 'Linked Mode', '161': 'SNMP Polling', '162': 'SNMP Trap Send', '389': 'LDAP port number for the Directory Services for the vCenter Server group', '443': 'Used to listen for connections from the vSphere Client', '443': 'vCenter Agent. Host DPM with HP iLO Remote Management and Control Protocol', '623': 'DPM and IPMI (iLO/BMC) ASF Remote Management and Control Protocol', '902': 'Used to send data to managed hosts', '902': 'Managed hosts send a regular heartbeat to the vCenter Server system', '902': 'Host access to other hosts for migration and provisioning', '1024': 'Bi-directional RPC communication on dynamic TCP ports', '1433': 'vCenter Microsoft SQL Server Database', '1521': 'vCenter Oracle Database', '5988': 'CIM Transactions over HTTPs', '7500': 'Linked Mode, Java Discovery Port', '8005': 'Internal Communication Port', '8006': 'Internal Communication Port', '8009': 'AJP Port', '8080': 'VMware Virtual Center Management Web Services', '8083': 'Internal Service Diagnostics', '8085': 'Internal Service Diagnostics/SDK', '8086': 'Internal Communication Port', '8087': 'Internal Service Diagnostics', '8089': 'SDK Tunneling Port', '8443': 'VMware Virtual Center Management Web Services', '9443': 'vSphere Web Client Access', '10109': 'Inventory Service Linked Mode Communication', '10443': 'Inventory Service HTTPS', '51915': 'Web service used to add host to AD domain'} #endregion #set the vsphere_object_ip_address to the vcenter IP for proper identification in the port check function port_check(vcenter_5, vcenter_input.ip, 'vCenter') #endregion #region ESXi #region Ports required for ESXi 5.x esxi_5 = {'22': 'SSH Server', '53': 'DNS Client', '68': 'DHCP Client', '80': 'Redirect Web Browser to HTTPS Server (443)', '88': 'PAM Active Directory Authentication - Kerberos', '111': 'NFS Client - RPC Portmapper', '123': 'NTP Client', '161': 'SNMP Polling', '162': 'SNMP Trap Send', '389': 'PAM Active Directory Authentication - Kerberos', '427': 'CIM Service Location Protocol (SLP)', '443': 'VI / vSphere Client to ESXi/ESX Host management connection', '443': 'Host to host VM migration provisioning', '445': 'PAM Active Directory Authentication', '464': 'PAM Active Directory Authentication - Kerberos', '514': 'Remote syslog logging', '902': 'Host access to other hosts for migration and provisioning', '902': 'vSphere Client access to virtual machine consoles(MKS)', '902': '(UDP) Status update (heartbeat) connection from ESXi to vCenter Server', '1024': 'Bi-directional communication on TCP/UDP porsts is required between the ESXi host and the AD Domain Controller', '2049': 'Transactions from NFS storage devices', '3260': 'Transactions to iSCSI storage devices', '5900': 'RFP protocol which is used by management tools such as VNC', '5988': 'CIM transactions over HTTP', '5989': 'CIM XML transactions over HTTPS', '8000': 'Requests from vMotion', '8100': 'Traffic between hosts for vSphere Fault Tolerance (FT)', '8182': 'Traffic between hosts for vSphere High Availability (HA)', '8200': 'DVS Port Information', '8301': 'DVS Portion Information', '8302': 'Internal Communication Port'} #endregion if esxi_input.hosts > 0: for x in range((int(esxi_input.hosts) - 1)): esxi__increase_ip() for index, vsphere_object_ip_address in enumerate(esxi_input.ip): port_check(esxi_5, vsphere_object_ip_address, 'ESXi') #endregion #determine if user is running Windows to keep terminal open if platform.system() == 'Windows': raw_input('To exit hit enter:')
import random import re import math from heltour.tournament.team_rating_utils import variance, \ team_rating_variance, team_rating_range from itertools import combinations from functools import partial from multiprocessing import Pool from django.conf import settings class Player: pref_score = 0 team = None board = None req_met = False def __init__(self, name, rating, friends, avoid, date, alt, previous_season_alt): self.name = name self.rating = rating self.friends = friends self.avoid = avoid self.date = date self.alt = alt self.previous_season_alt = previous_season_alt @classmethod def player_from_json(cls, player): return cls( player['name'], player['rating'], player['friends'], player['avoid'], player['date_created'], player['prefers_alt'], player.get('previous_season_alternate', False) ) def __repr__(self): return str((self.name, self.board, self.rating, self.req_met)) def __lt__(self, other): return True def set_pref_score(self): self.pref_score = 0 for friend in self.friends: if friend in self.team.get_boards(): self.pref_score += 1 else: self.pref_score -= 1 for avoid in self.avoid: if avoid in self.team.get_boards(): self.pref_score -= 1 # player with more than 5 choices can be <5 preference even if all teammates are preferred def set_req_met(self): self.req_met = False if not self.friends: self.req_met = None for friend in self.friends: if friend in self.team.get_boards(): self.req_met = True class Team: def __init__(self, boards): self.boards = [None for x in range(boards)] def __str__(self): return str((self.boards, self.team_pref_score, self.get_mean())) def __repr__(self): return "Team:{0}".format(id(self)) def __lt__(self, other): return True def changeBoard(self, board, new_player): # updates the player on a board and updates that player's team attribute if self.boards[board]: self.boards[board].team = None self.boards[board] = new_player if new_player.team: new_player.team.boards[board] = None new_player.team = self def get_mean(self, expected_rating=False): # expected_rating is an unused parameter in this version. # it is used by the tournament.models.Team.get_mean method. ratings = [board.rating for board in self.boards] mean = sum(ratings) / len(ratings) return mean def get_boards(self): return self.boards def get_player(self, board): return self.boards[board] def set_team_pref_score(self): self.team_pref_score = sum([x.pref_score for x in self.boards]) def update_pref(players, teams): # update preference scores for player in players: player.set_pref_score() for team in teams: team.set_team_pref_score() def update_sort(players, teams): # based on preference score high to low players.sort(key=lambda player: (player.team.team_pref_score, player.pref_score), reverse=False) teams.sort(key=lambda team: team.team_pref_score, reverse=False) def split_into_equal_groups_by_rating(players, group_number): players.sort(key=lambda player: player.rating, reverse=True) avg = len(players) / float(group_number) players_split = [] last = 0.0 while round(last) < len(players): players_split.append(players[int(round(last)):int(round(last + avg))]) last += avg return players_split def get_rating_bounds_of_split(split): min_ratings = [min([p.rating for p in board]) for board in split] max_ratings = [max([p.rating for p in board]) for board in split] min_ratings[-1] = 0 max_ratings[0] = 5000 return list(zip(min_ratings, max_ratings)) def total_happiness(teams): return sum([team.team_pref_score for team in teams]) def flatten(lst): return [item for sub_lst in lst for item in sub_lst] def make_league(playerdata, boards, balance): players = [] for player in playerdata: if player['has_20_games'] and player['in_slack']: players.append(Player.player_from_json(player)) else: pass # print("{0} skipped".format(player['name'])) players.sort(key=lambda player: player.rating, reverse=True) # Split into those that want to be alternates vs those that do not. alternates = [p for p in players if p.alt] players = [p for p in players if not p.alt] # splits list of Player objects into 6 near equal lists, sectioned by rating players_split = split_into_equal_groups_by_rating(players, boards) team_rating_bounds = get_rating_bounds_of_split(players_split) num_teams = int(math.ceil((len(players_split[0]) * balance) / 2.0) * 2) # print(f"Targetting {num_teams} teams") # separate latest joining players into alternate lists as required for n, board in enumerate(players_split): board.sort(key=lambda player: (0 if player.previous_season_alt else 1, player.date)) alternates.extend(board[num_teams:]) del board[num_teams:] board.sort(key=lambda player: player.rating, reverse=True) alts_split = split_into_equal_groups_by_rating(alternates, boards) alt_rating_bounds = get_rating_bounds_of_split(alts_split) players = flatten(players_split) # print len(players) # print num_teams # print alts_split for n, board in enumerate(players_split): for player in board: player.board = n def convert_name_list(string_of_names, players): pattern = r"([^-_a-zA-Z0-9]|^){0}([^-_a-zA-Z0-9]|$)" return [player for player in players if re.search(pattern.format(player.name), string_of_names, flags=re.I)] for player in players: filtered_players = [p for p in players if p.board != player.board] player.friends = convert_name_list(player.friends, filtered_players) player.avoid = convert_name_list(player.avoid, filtered_players) # randomly shuffle players for board in players_split: random.shuffle(board) teams = [] for n in range(num_teams): teams.append(Team(boards)) for n, board in enumerate(players_split): for team, player in enumerate(board): teams[team].changeBoard(n, player) update_pref(players, teams) update_sort(players, teams) def swapPlayers(teama, playera, teamb, playerb, board): # swap players between teams - ensure players are same board for input teama.changeBoard(board, playerb) teamb.changeBoard(board, playera) def testSwap(teama, playera, teamb, playerb, board): # try a swap and return the preference change if this swap was made prior_pref = teama.team_pref_score + teamb.team_pref_score swapPlayers(teama, playera, teamb, playerb, board) # swap players forwards update_pref(players, teams) post_pref = teama.team_pref_score + teamb.team_pref_score swapPlayers(teama, playerb, teamb, playera, board) # swap players back update_pref(players, teams) return post_pref - prior_pref # more positive = better swap # take player from least happy team # calculate the overall preference score if player were to swap to each of the preferences' teams or preference swaps into their team. # swap player into the team that makes the best change to overall preference # check if the swap has increased the overall preference rating # if swap made, resort list by preference score and start at the least happy player again # if no improving swaps are available, go to the next player # if end of the list reached with no swaps made: stop p = 0 while p < len(players): player = players[p] # least happy player swaps = [] for friend in player.friends: # test both direction swaps for each friend and whichever is better, add the swap ID and score to temp # friends list # board check is redundant due to earlier removal of same board requests if friend.board != player.board and friend.team != player.team: # test swap friend to player team (swap1) swap1_ID = (friend.team, friend, player.team, player.team.get_player(friend.board), friend.board) swap1_score = testSwap(*swap1_ID) # test swap player to friend team (swap2) swap2_ID = (player.team, player, friend.team, friend.team.get_player(player.board), player.board) swap2_score = testSwap(*swap2_ID) swaps.append(max((swap1_score, swap1_ID), (swap2_score, swap2_ID))) for avoid in player.avoid: # test moving player to be avoided to the best preferred team if player.team == avoid.team: # otherwise irrelevant for swap_team in teams: swap_ID = ( avoid.team, avoid, swap_team, swap_team.get_player(avoid.board), avoid.board) swap_score = testSwap(*swap_ID) swaps.append((swap_score, swap_ID)) swaps.sort() if swaps and swaps[-1][ 0] > 0: # there is a swap to make and it improves the preference score swapPlayers(*(swaps[-1][1])) # print(swaps[-1]) update_pref(players, teams) update_sort(players, teams) p = 0 else: # go to the next player in the list p += 1 for player in players: player.set_req_met() return {'teams': teams, 'players': players, 'alternates': alternates, 'team_rating_bounds': team_rating_bounds, 'alt_rating_bounds': alt_rating_bounds, 'alts_split': alts_split} # Reduce variance functions def intersection(lst1, lst2): return set(lst1).intersection(set(lst2)) # Does this swap have a neutral effect on happiness def is_neutral_swap(swap): def count_on_team(attr, player, team): n = len(intersection(getattr(player, attr), team.boards)) n += len([p for p in team.boards if player in getattr(p, attr)]) return n count_friends_on_team = partial(count_on_team, 'friends') count_avoids_on_team = partial(count_on_team, 'avoid') pa, pb = swap pre_swap_score = count_friends_on_team(pa, pa.team) \ + count_friends_on_team(pb, pb.team) \ - count_avoids_on_team(pa, pa.team) \ - count_avoids_on_team(pb, pb.team) post_swap_score = count_friends_on_team(pa, pb.team) \ + count_friends_on_team(pb, pa.team) \ - count_avoids_on_team(pa, pb.team) \ - count_avoids_on_team(pb, pa.team) if pre_swap_score != post_swap_score: return False return True def get_swaps(teams): num_boards = len(teams[0].boards) boards = [[team.boards[i] for team in teams] for i in range(num_boards)] swaps = [[swap for swap in combinations(board, 2) if is_neutral_swap(swap)] for board in boards] return flatten(swaps) def rating_variance_improvement(league_mean, n_boards, swap): def score(a, b): return variance(league_mean, [a, b]) pa, pb = swap a_mean = pa.team.get_mean() b_mean = pb.team.get_mean() initial_score = score(a_mean, b_mean) # calculating change in mean if we swapped the players. rating_diff = pb.rating - pa.rating a_mean = a_mean + rating_diff / n_boards b_mean = b_mean - rating_diff / n_boards new_score = score(a_mean, b_mean) # lower is better return new_score - initial_score def get_best_swap(swaps, fun): best_swap = min(swaps, key=fun) return best_swap, fun(best_swap) def perform_swap(swap): pa, pb = swap ta = pa.team tb = pb.team board = pa.board ta.changeBoard(board, pb) tb.changeBoard(board, pa) def update_swaps(swaps, swap_performed, teams): pa, pb = swap_performed affected_players = pa.team.boards + pb.team.boards # remove all swaps involving players affected by the swap. swaps = [swap for swap in swaps if not intersection(swap, affected_players)] # find new neutral swaps involving the players affected by swap. for player in affected_players: board = player.board players_on_board = [team.boards[board] for team in teams if not team.boards[board] in affected_players] swaps.extend([(player, p) for p in players_on_board if is_neutral_swap((player, p))]) swaps.extend([swap for swap in zip(pa.team.boards, pb.team.boards) if is_neutral_swap(swap)]) return swaps def reduce_variance(teams): # players = flatten([team.boards for team in teams]) league_mean = sum([team.get_mean() for team in teams]) / len(teams) n_boards = len(teams[0].boards) swaps = get_swaps(teams) eval_fun = partial(rating_variance_improvement, league_mean, n_boards) best_swap, swap_value = get_best_swap(swaps, eval_fun) # infinite loop possibility here? i = 0 max_iterations = 200 epsilon = 0.0000001 while swap_value <= -epsilon and i < max_iterations: # variance = team_rating_variance(teams, league_mean) # update_pref(players, teams) # score = total_happiness(teams) # print() # print("i: ", i) # print("variance: ", variance) # print("score: ", score) # print("swap_value: ", swap_value) # print("best_swap: ", best_swap) i += 1 perform_swap(best_swap) swaps = update_swaps(swaps, best_swap, teams) best_swap, swap_value = get_best_swap(swaps, eval_fun) # means = [team.get_mean() for team in teams] # print("means: ", sorted(means)) return teams def make_league_map(args): return make_league(*args) def reduce_variance_map(league): league['teams'] = reduce_variance(league['teams']) return league def get_best_league(player_data, boards, balance, count): pool = Pool(getattr(settings, 'TEAMGEN_PROCESSES_NUMBER', 1)) args = [(player_data, boards, balance) for _ in range(count)] leagues = pool.map(make_league_map, args) max_happiness = max([total_happiness(l['teams']) for l in leagues]) happy_leagues = [l for l in leagues if total_happiness(l['teams']) == max_happiness] happy_leagues = pool.map(reduce_variance_map, happy_leagues) min_range_league = min(happy_leagues, key=lambda l: team_rating_range(l['teams'])) return min_range_league
# Authors: # Trevor Perrin # Google - handling CertificateRequest.certificate_types # Google (adapted by Sam Rushing and Marcelo Fernandez) - NPN support # Dimitris Moraitis - Anon ciphersuites # Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2 # # See the LICENSE file for legal information regarding use of this file. """Classes representing TLS messages.""" from .utils.compat import * from .utils.cryptomath import * from .errors import * from .utils.codec import * from .constants import * from .x509 import X509 from .x509certchain import X509CertChain from .utils.tackwrapper import * class RecordHeader3(object): def __init__(self): self.type = 0 self.version = (0,0) self.length = 0 self.ssl2 = False def create(self, version, type, length): self.type = type self.version = version self.length = length return self def write(self): w = Writer() w.add(self.type, 1) w.add(self.version[0], 1) w.add(self.version[1], 1) w.add(self.length, 2) return w.bytes def parse(self, p): self.type = p.get(1) self.version = (p.get(1), p.get(1)) self.length = p.get(2) self.ssl2 = False return self class RecordHeader2(object): def __init__(self): self.type = 0 self.version = (0,0) self.length = 0 self.ssl2 = True def parse(self, p): if p.get(1)!=128: raise SyntaxError() self.type = ContentType.handshake self.version = (2,0) #We don't support 2-byte-length-headers; could be a problem self.length = p.get(1) return self class Alert(object): def __init__(self): self.contentType = ContentType.alert self.level = 0 self.description = 0 def create(self, description, level=AlertLevel.fatal): self.level = level self.description = description return self def parse(self, p): p.setLengthCheck(2) self.level = p.get(1) self.description = p.get(1) p.stopLengthCheck() return self def write(self): w = Writer() w.add(self.level, 1) w.add(self.description, 1) return w.bytes class HandshakeMsg(object): def __init__(self, handshakeType): self.contentType = ContentType.handshake self.handshakeType = handshakeType def postWrite(self, w): headerWriter = Writer() headerWriter.add(self.handshakeType, 1) headerWriter.add(len(w.bytes), 3) return headerWriter.bytes + w.bytes class ClientHello(HandshakeMsg): def __init__(self, ssl2=False): HandshakeMsg.__init__(self, HandshakeType.client_hello) self.ssl2 = ssl2 self.client_version = (0,0) self.random = bytearray(32) self.session_id = bytearray(0) self.cipher_suites = [] # a list of 16-bit values self.certificate_types = [CertificateType.x509] self.compression_methods = [] # a list of 8-bit values self.srp_username = None # a string self.tack = False self.supports_npn = False self.server_name = bytearray(0) self.channel_id = False self.extended_master_secret = False self.tb_client_params = [] self.support_signed_cert_timestamps = False self.status_request = False def create(self, version, random, session_id, cipher_suites, certificate_types=None, srpUsername=None, tack=False, supports_npn=False, serverName=None): self.client_version = version self.random = random self.session_id = session_id self.cipher_suites = cipher_suites self.certificate_types = certificate_types self.compression_methods = [0] if srpUsername: self.srp_username = bytearray(srpUsername, "utf-8") self.tack = tack self.supports_npn = supports_npn if serverName: self.server_name = bytearray(serverName, "utf-8") return self def parse(self, p): if self.ssl2: self.client_version = (p.get(1), p.get(1)) cipherSpecsLength = p.get(2) sessionIDLength = p.get(2) randomLength = p.get(2) self.cipher_suites = p.getFixList(3, cipherSpecsLength//3) self.session_id = p.getFixBytes(sessionIDLength) self.random = p.getFixBytes(randomLength) if len(self.random) < 32: zeroBytes = 32-len(self.random) self.random = bytearray(zeroBytes) + self.random self.compression_methods = [0]#Fake this value #We're not doing a stopLengthCheck() for SSLv2, oh well.. else: p.startLengthCheck(3) self.client_version = (p.get(1), p.get(1)) self.random = p.getFixBytes(32) self.session_id = p.getVarBytes(1) self.cipher_suites = p.getVarList(2, 2) self.compression_methods = p.getVarList(1, 1) if not p.atLengthCheck(): totalExtLength = p.get(2) soFar = 0 while soFar != totalExtLength: extType = p.get(2) extLength = p.get(2) index1 = p.index if extType == ExtensionType.srp: self.srp_username = p.getVarBytes(1) elif extType == ExtensionType.cert_type: self.certificate_types = p.getVarList(1, 1) elif extType == ExtensionType.tack: self.tack = True elif extType == ExtensionType.supports_npn: self.supports_npn = True elif extType == ExtensionType.server_name: serverNameListBytes = p.getFixBytes(extLength) p2 = Parser(serverNameListBytes) p2.startLengthCheck(2) while 1: if p2.atLengthCheck(): break # no host_name, oh well name_type = p2.get(1) hostNameBytes = p2.getVarBytes(2) if name_type == NameType.host_name: self.server_name = hostNameBytes break elif extType == ExtensionType.channel_id: self.channel_id = True elif extType == ExtensionType.extended_master_secret: self.extended_master_secret = True elif extType == ExtensionType.token_binding: tokenBindingBytes = p.getFixBytes(extLength) p2 = Parser(tokenBindingBytes) ver_minor = p2.get(1) ver_major = p2.get(1) if (ver_major, ver_minor) >= (0, 3): p2.startLengthCheck(1) while not p2.atLengthCheck(): self.tb_client_params.append(p2.get(1)) elif extType == ExtensionType.signed_cert_timestamps: if extLength: raise SyntaxError() self.support_signed_cert_timestamps = True elif extType == ExtensionType.status_request: # Extension contents are currently ignored. # According to RFC 6066, this is not strictly forbidden # (although it is suboptimal): # Servers that receive a client hello containing the # "status_request" extension MAY return a suitable # certificate status response to the client along with # their certificate. If OCSP is requested, they # SHOULD use the information contained in the extension # when selecting an OCSP responder and SHOULD include # request_extensions in the OCSP request. p.getFixBytes(extLength) self.status_request = True else: _ = p.getFixBytes(extLength) index2 = p.index if index2 - index1 != extLength: raise SyntaxError("Bad length for extension_data") soFar += 4 + extLength p.stopLengthCheck() return self def write(self): w = Writer() w.add(self.client_version[0], 1) w.add(self.client_version[1], 1) w.addFixSeq(self.random, 1) w.addVarSeq(self.session_id, 1, 1) w.addVarSeq(self.cipher_suites, 2, 2) w.addVarSeq(self.compression_methods, 1, 1) w2 = Writer() # For Extensions if self.certificate_types and self.certificate_types != \ [CertificateType.x509]: w2.add(ExtensionType.cert_type, 2) w2.add(len(self.certificate_types)+1, 2) w2.addVarSeq(self.certificate_types, 1, 1) if self.srp_username: w2.add(ExtensionType.srp, 2) w2.add(len(self.srp_username)+1, 2) w2.addVarSeq(self.srp_username, 1, 1) if self.supports_npn: w2.add(ExtensionType.supports_npn, 2) w2.add(0, 2) if self.server_name: w2.add(ExtensionType.server_name, 2) w2.add(len(self.server_name)+5, 2) w2.add(len(self.server_name)+3, 2) w2.add(NameType.host_name, 1) w2.addVarSeq(self.server_name, 1, 2) if self.tack: w2.add(ExtensionType.tack, 2) w2.add(0, 2) if len(w2.bytes): w.add(len(w2.bytes), 2) w.bytes += w2.bytes return self.postWrite(w) class BadNextProtos(Exception): def __init__(self, l): self.length = l def __str__(self): return 'Cannot encode a list of next protocols because it contains an element with invalid length %d. Element lengths must be 0 < x < 256' % self.length class ServerHello(HandshakeMsg): def __init__(self): HandshakeMsg.__init__(self, HandshakeType.server_hello) self.server_version = (0,0) self.random = bytearray(32) self.session_id = bytearray(0) self.cipher_suite = 0 self.certificate_type = CertificateType.x509 self.compression_method = 0 self.tackExt = None self.next_protos_advertised = None self.next_protos = None self.channel_id = False self.extended_master_secret = False self.tb_params = None self.signed_cert_timestamps = None self.status_request = False def create(self, version, random, session_id, cipher_suite, certificate_type, tackExt, next_protos_advertised): self.server_version = version self.random = random self.session_id = session_id self.cipher_suite = cipher_suite self.certificate_type = certificate_type self.compression_method = 0 self.tackExt = tackExt self.next_protos_advertised = next_protos_advertised return self def parse(self, p): p.startLengthCheck(3) self.server_version = (p.get(1), p.get(1)) self.random = p.getFixBytes(32) self.session_id = p.getVarBytes(1) self.cipher_suite = p.get(2) self.compression_method = p.get(1) if not p.atLengthCheck(): totalExtLength = p.get(2) soFar = 0 while soFar != totalExtLength: extType = p.get(2) extLength = p.get(2) if extType == ExtensionType.cert_type: if extLength != 1: raise SyntaxError() self.certificate_type = p.get(1) elif extType == ExtensionType.tack and tackpyLoaded: self.tackExt = TackExtension(p.getFixBytes(extLength)) elif extType == ExtensionType.supports_npn: self.next_protos = self.__parse_next_protos(p.getFixBytes(extLength)) else: p.getFixBytes(extLength) soFar += 4 + extLength p.stopLengthCheck() return self def __parse_next_protos(self, b): protos = [] while True: if len(b) == 0: break l = b[0] b = b[1:] if len(b) < l: raise BadNextProtos(len(b)) protos.append(b[:l]) b = b[l:] return protos def __next_protos_encoded(self): b = bytearray() for e in self.next_protos_advertised: if len(e) > 255 or len(e) == 0: raise BadNextProtos(len(e)) b += bytearray( [len(e)] ) + bytearray(e) return b def write(self): w = Writer() w.add(self.server_version[0], 1) w.add(self.server_version[1], 1) w.addFixSeq(self.random, 1) w.addVarSeq(self.session_id, 1, 1) w.add(self.cipher_suite, 2) w.add(self.compression_method, 1) w2 = Writer() # For Extensions if self.certificate_type and self.certificate_type != \ CertificateType.x509: w2.add(ExtensionType.cert_type, 2) w2.add(1, 2) w2.add(self.certificate_type, 1) if self.tackExt: b = self.tackExt.serialize() w2.add(ExtensionType.tack, 2) w2.add(len(b), 2) w2.bytes += b if self.next_protos_advertised is not None: encoded_next_protos_advertised = self.__next_protos_encoded() w2.add(ExtensionType.supports_npn, 2) w2.add(len(encoded_next_protos_advertised), 2) w2.addFixSeq(encoded_next_protos_advertised, 1) if self.channel_id: w2.add(ExtensionType.channel_id, 2) w2.add(0, 2) if self.extended_master_secret: w2.add(ExtensionType.extended_master_secret, 2) w2.add(0, 2) if self.tb_params: w2.add(ExtensionType.token_binding, 2) # length of extension w2.add(4, 2) # version w2.add(0, 1) w2.add(4, 1) # length of params (defined as variable length <1..2^8-1>, but in # this context the server can only send a single value. w2.add(1, 1) w2.add(self.tb_params, 1) if self.signed_cert_timestamps: w2.add(ExtensionType.signed_cert_timestamps, 2) w2.addVarSeq(bytearray(self.signed_cert_timestamps), 1, 2) if self.status_request: w2.add(ExtensionType.status_request, 2) w2.add(0, 2) if len(w2.bytes): w.add(len(w2.bytes), 2) w.bytes += w2.bytes return self.postWrite(w) class Certificate(HandshakeMsg): def __init__(self, certificateType): HandshakeMsg.__init__(self, HandshakeType.certificate) self.certificateType = certificateType self.certChain = None def create(self, certChain): self.certChain = certChain return self def parse(self, p): p.startLengthCheck(3) if self.certificateType == CertificateType.x509: chainLength = p.get(3) index = 0 certificate_list = [] while index != chainLength: certBytes = p.getVarBytes(3) x509 = X509() x509.parseBinary(certBytes) certificate_list.append(x509) index += len(certBytes)+3 if certificate_list: self.certChain = X509CertChain(certificate_list) else: raise AssertionError() p.stopLengthCheck() return self def write(self): w = Writer() if self.certificateType == CertificateType.x509: chainLength = 0 if self.certChain: certificate_list = self.certChain.x509List else: certificate_list = [] #determine length for cert in certificate_list: bytes = cert.writeBytes() chainLength += len(bytes)+3 #add bytes w.add(chainLength, 3) for cert in certificate_list: bytes = cert.writeBytes() w.addVarSeq(bytes, 1, 3) else: raise AssertionError() return self.postWrite(w) class CertificateStatus(HandshakeMsg): def __init__(self): HandshakeMsg.__init__(self, HandshakeType.certificate_status) def create(self, ocsp_response): self.ocsp_response = ocsp_response return self # Defined for the sake of completeness, even though we currently only # support sending the status message (server-side), not requesting # or receiving it (client-side). def parse(self, p): p.startLengthCheck(3) status_type = p.get(1) # Only one type is specified, so hardwire it. if status_type != CertificateStatusType.ocsp: raise SyntaxError() ocsp_response = p.getVarBytes(3) if not ocsp_response: # Can't be empty raise SyntaxError() self.ocsp_response = ocsp_response p.stopLengthCheck() return self def write(self): w = Writer() w.add(CertificateStatusType.ocsp, 1) w.addVarSeq(bytearray(self.ocsp_response), 1, 3) return self.postWrite(w) class CertificateRequest(HandshakeMsg): def __init__(self, version): HandshakeMsg.__init__(self, HandshakeType.certificate_request) self.certificate_types = [] self.certificate_authorities = [] self.version = version self.supported_signature_algs = [] def create(self, certificate_types, certificate_authorities, sig_algs): self.certificate_types = certificate_types self.certificate_authorities = certificate_authorities self.supported_signature_algs = sig_algs return self def parse(self, p): p.startLengthCheck(3) self.certificate_types = p.getVarList(1, 1) if self.version >= (3,3): self.supported_signature_algs = \ [(b >> 8, b & 0xff) for b in p.getVarList(2, 2)] ca_list_length = p.get(2) index = 0 self.certificate_authorities = [] while index != ca_list_length: ca_bytes = p.getVarBytes(2) self.certificate_authorities.append(ca_bytes) index += len(ca_bytes)+2 p.stopLengthCheck() return self def write(self): w = Writer() w.addVarSeq(self.certificate_types, 1, 1) if self.version >= (3,3): w.add(2 * len(self.supported_signature_algs), 2) for (hash, signature) in self.supported_signature_algs: w.add(hash, 1) w.add(signature, 1) caLength = 0 #determine length for ca_dn in self.certificate_authorities: caLength += len(ca_dn)+2 w.add(caLength, 2) #add bytes for ca_dn in self.certificate_authorities: w.addVarSeq(ca_dn, 1, 2) return self.postWrite(w) class ServerKeyExchange(HandshakeMsg): def __init__(self, cipherSuite, version): HandshakeMsg.__init__(self, HandshakeType.server_key_exchange) self.cipherSuite = cipherSuite self.version = version self.srp_N = 0 self.srp_g = 0 self.srp_s = bytearray(0) self.srp_B = 0 # DH params: self.dh_p = 0 self.dh_g = 0 self.dh_Ys = 0 # ECDH params: self.ecdhCurve = 0 self.ecdhPublic = bytearray(0) self.signature = bytearray(0) def createSRP(self, srp_N, srp_g, srp_s, srp_B): self.srp_N = srp_N self.srp_g = srp_g self.srp_s = srp_s self.srp_B = srp_B return self def createDH(self, dh_p, dh_g, dh_Ys): self.dh_p = dh_p self.dh_g = dh_g self.dh_Ys = dh_Ys return self def createECDH(self, ecdhCurve, ecdhPublic): self.ecdhCurve = ecdhCurve self.ecdhPublic = ecdhPublic return self def parse(self, p): p.startLengthCheck(3) if self.cipherSuite in CipherSuite.srpAllSuites: self.srp_N = bytesToNumber(p.getVarBytes(2)) self.srp_g = bytesToNumber(p.getVarBytes(2)) self.srp_s = p.getVarBytes(1) self.srp_B = bytesToNumber(p.getVarBytes(2)) if self.cipherSuite in CipherSuite.srpCertSuites: self.signature = p.getVarBytes(2) elif self.cipherSuite in CipherSuite.anonSuites: self.dh_p = bytesToNumber(p.getVarBytes(2)) self.dh_g = bytesToNumber(p.getVarBytes(2)) self.dh_Ys = bytesToNumber(p.getVarBytes(2)) p.stopLengthCheck() return self def write_params(self): w = Writer() if self.cipherSuite in CipherSuite.srpAllSuites: w.addVarSeq(numberToByteArray(self.srp_N), 1, 2) w.addVarSeq(numberToByteArray(self.srp_g), 1, 2) w.addVarSeq(self.srp_s, 1, 1) w.addVarSeq(numberToByteArray(self.srp_B), 1, 2) elif self.cipherSuite in CipherSuite.dhAllSuites: w.addVarSeq(numberToByteArray(self.dh_p), 1, 2) w.addVarSeq(numberToByteArray(self.dh_g), 1, 2) w.addVarSeq(numberToByteArray(self.dh_Ys), 1, 2) elif self.cipherSuite in CipherSuite.ecdhAllSuites: w.add(ECCurveType.named_curve, 1) w.add(self.ecdhCurve, 2) w.addVarSeq(self.ecdhPublic, 1, 1) else: assert(False) return w.bytes def write(self): w = Writer() w.bytes += self.write_params() if self.cipherSuite in CipherSuite.certAllSuites: if self.version >= (3,3): # TODO: Signature algorithm negotiation not supported. w.add(HashAlgorithm.sha1, 1) w.add(SignatureAlgorithm.rsa, 1) w.addVarSeq(self.signature, 1, 2) return self.postWrite(w) def hash(self, clientRandom, serverRandom): bytes = clientRandom + serverRandom + self.write_params() if self.version >= (3,3): # TODO: Signature algorithm negotiation not supported. return SHA1(bytes) return MD5(bytes) + SHA1(bytes) class ServerHelloDone(HandshakeMsg): def __init__(self): HandshakeMsg.__init__(self, HandshakeType.server_hello_done) def create(self): return self def parse(self, p): p.startLengthCheck(3) p.stopLengthCheck() return self def write(self): w = Writer() return self.postWrite(w) class ClientKeyExchange(HandshakeMsg): def __init__(self, cipherSuite, version=None): HandshakeMsg.__init__(self, HandshakeType.client_key_exchange) self.cipherSuite = cipherSuite self.version = version self.srp_A = 0 self.encryptedPreMasterSecret = bytearray(0) def createSRP(self, srp_A): self.srp_A = srp_A return self def createRSA(self, encryptedPreMasterSecret): self.encryptedPreMasterSecret = encryptedPreMasterSecret return self def createDH(self, dh_Yc): self.dh_Yc = dh_Yc return self def parse(self, p): p.startLengthCheck(3) if self.cipherSuite in CipherSuite.srpAllSuites: self.srp_A = bytesToNumber(p.getVarBytes(2)) elif self.cipherSuite in CipherSuite.certSuites: if self.version in ((3,1), (3,2), (3,3)): self.encryptedPreMasterSecret = p.getVarBytes(2) elif self.version == (3,0): self.encryptedPreMasterSecret = \ p.getFixBytes(len(p.bytes)-p.index) else: raise AssertionError() elif self.cipherSuite in CipherSuite.dhAllSuites: self.dh_Yc = bytesToNumber(p.getVarBytes(2)) elif self.cipherSuite in CipherSuite.ecdhAllSuites: self.ecdh_Yc = p.getVarBytes(1) else: raise AssertionError() p.stopLengthCheck() return self def write(self): w = Writer() if self.cipherSuite in CipherSuite.srpAllSuites: w.addVarSeq(numberToByteArray(self.srp_A), 1, 2) elif self.cipherSuite in CipherSuite.certSuites: if self.version in ((3,1), (3,2), (3,3)): w.addVarSeq(self.encryptedPreMasterSecret, 1, 2) elif self.version == (3,0): w.addFixSeq(self.encryptedPreMasterSecret, 1) else: raise AssertionError() elif self.cipherSuite in CipherSuite.anonSuites: w.addVarSeq(numberToByteArray(self.dh_Yc), 1, 2) else: raise AssertionError() return self.postWrite(w) class CertificateVerify(HandshakeMsg): def __init__(self, version): HandshakeMsg.__init__(self, HandshakeType.certificate_verify) self.version = version self.signature_algorithm = None self.signature = bytearray(0) def create(self, signature_algorithm, signature): self.signature_algorithm = signature_algorithm self.signature = signature return self def parse(self, p): p.startLengthCheck(3) if self.version >= (3,3): self.signature_algorithm = (p.get(1), p.get(1)) self.signature = p.getVarBytes(2) p.stopLengthCheck() return self def write(self): w = Writer() if self.version >= (3,3): w.add(self.signature_algorithm[0], 1) w.add(self.signature_algorithm[1], 1) w.addVarSeq(self.signature, 1, 2) return self.postWrite(w) class ChangeCipherSpec(object): def __init__(self): self.contentType = ContentType.change_cipher_spec self.type = 1 def create(self): self.type = 1 return self def parse(self, p): p.setLengthCheck(1) self.type = p.get(1) p.stopLengthCheck() return self def write(self): w = Writer() w.add(self.type,1) return w.bytes class NextProtocol(HandshakeMsg): def __init__(self): HandshakeMsg.__init__(self, HandshakeType.next_protocol) self.next_proto = None def create(self, next_proto): self.next_proto = next_proto return self def parse(self, p): p.startLengthCheck(3) self.next_proto = p.getVarBytes(1) _ = p.getVarBytes(1) p.stopLengthCheck() return self def write(self, trial=False): w = Writer() w.addVarSeq(self.next_proto, 1, 1) paddingLen = 32 - ((len(self.next_proto) + 2) % 32) w.addVarSeq(bytearray(paddingLen), 1, 1) return self.postWrite(w) class Finished(HandshakeMsg): def __init__(self, version): HandshakeMsg.__init__(self, HandshakeType.finished) self.version = version self.verify_data = bytearray(0) def create(self, verify_data): self.verify_data = verify_data return self def parse(self, p): p.startLengthCheck(3) if self.version == (3,0): self.verify_data = p.getFixBytes(36) elif self.version in ((3,1), (3,2), (3,3)): self.verify_data = p.getFixBytes(12) else: raise AssertionError() p.stopLengthCheck() return self def write(self): w = Writer() w.addFixSeq(self.verify_data, 1) return self.postWrite(w) class EncryptedExtensions(HandshakeMsg): def __init__(self): self.channel_id_key = None self.channel_id_proof = None def parse(self, p): p.startLengthCheck(3) soFar = 0 while soFar != p.lengthCheck: extType = p.get(2) extLength = p.get(2) if extType == ExtensionType.channel_id: if extLength != 32*4: raise SyntaxError() self.channel_id_key = p.getFixBytes(64) self.channel_id_proof = p.getFixBytes(64) else: p.getFixBytes(extLength) soFar += 4 + extLength p.stopLengthCheck() return self class ApplicationData(object): def __init__(self): self.contentType = ContentType.application_data self.bytes = bytearray(0) def create(self, bytes): self.bytes = bytes return self def splitFirstByte(self): newMsg = ApplicationData().create(self.bytes[:1]) self.bytes = self.bytes[1:] return newMsg def parse(self, p): self.bytes = p.bytes return self def write(self): return self.bytes
# -*- coding: utf-8 -*- """The JSON serializer object implementation.""" import binascii import codecs import collections import json from dfdatetime import factory as dfdatetime_factory from dfdatetime import interface as dfdatetime_interface from dfvfs.lib import definitions as dfvfs_definitions from dfvfs.path import path_spec as dfvfs_path_spec from dfvfs.path import factory as dfvfs_path_spec_factory # The following import is needed to make sure TSKTime is registered with # the dfDateTime factory. from dfvfs.vfs import tsk_file_entry # pylint: disable=unused-import from plaso.containers import interface as containers_interface from plaso.containers import manager as containers_manager from plaso.serializer import interface from plaso.serializer import logger class JSONAttributeContainerSerializer(interface.AttributeContainerSerializer): """JSON attribute container serializer.""" # Backwards compatibility for older session attribute containers that # contain session configuration attributes. _SESSION_START_LEGACY_ATTRIBUTE_NAMES = frozenset([ 'artifact_filters', 'command_line_arguments', 'debug_mode', 'enabled_parser_names', 'filter_file', 'parser_filter_expression', 'preferred_encoding', 'preferred_time_zone']) @classmethod def _ConvertAttributeContainerToDict(cls, attribute_container): """Converts an attribute container object into a JSON dictionary. The resulting dictionary of the JSON serialized objects consists of: { '__type__': 'AttributeContainer' '__container_type__': ... ... } Here '__type__' indicates the object base type. In this case 'AttributeContainer'. '__container_type__' indicates the container type and rest of the elements of the dictionary make up the attributes of the container. Args: attribute_container (AttributeContainer): attribute container. Returns: dict[str, object]: JSON serialized objects. Raises: TypeError: if not an instance of AttributeContainer. ValueError: if the attribute container type is not supported. """ if not isinstance( attribute_container, containers_interface.AttributeContainer): raise TypeError('{0!s} is not an attribute container type.'.format( type(attribute_container))) container_type = getattr(attribute_container, 'CONTAINER_TYPE', None) if not container_type: raise ValueError('Unsupported attribute container type: {0!s}.'.format( type(attribute_container))) json_dict = { '__type__': 'AttributeContainer', '__container_type__': container_type, } for attribute_name, attribute_value in attribute_container.GetAttributes(): json_dict[attribute_name] = cls._ConvertAttributeValueToDict( attribute_value) return json_dict # Pylint is confused by the formatting of the return type. # pylint: disable=missing-return-type-doc @classmethod def _ConvertAttributeValueToDict(cls, attribute_value): """Converts an attribute value into a JSON dictionary. Args: attribute_value (object): an attribute value. Returns: dict|list: The JSON serialized object which can be a dictionary or a list. """ if isinstance(attribute_value, bytes): encoded_value = binascii.b2a_qp(attribute_value) encoded_value = codecs.decode(encoded_value, 'ascii') attribute_value = { '__type__': 'bytes', 'stream': '{0:s}'.format(encoded_value) } elif isinstance(attribute_value, (list, tuple)): json_list = [] for list_element in attribute_value: json_dict = cls._ConvertAttributeValueToDict(list_element) json_list.append(json_dict) if isinstance(attribute_value, list): attribute_value = json_list else: attribute_value = { '__type__': 'tuple', 'values': json_list } elif isinstance(attribute_value, collections.Counter): attribute_value = cls._ConvertCollectionsCounterToDict(attribute_value) elif isinstance(attribute_value, dfdatetime_interface.DateTimeValues): attribute_value = cls._ConvertDateTimeValuesToDict(attribute_value) elif isinstance(attribute_value, dfvfs_path_spec.PathSpec): attribute_value = cls._ConvertPathSpecToDict(attribute_value) elif isinstance(attribute_value, containers_interface.AttributeContainer): attribute_value = cls._ConvertAttributeContainerToDict(attribute_value) return attribute_value @classmethod def _ConvertCollectionsCounterToDict(cls, collections_counter): """Converts a collections.Counter object into a JSON dictionary. The resulting dictionary of the JSON serialized objects consists of: { '__type__': 'collections.Counter' ... } Here '__type__' indicates the object base type. In this case 'collections.Counter'. The rest of the elements of the dictionary make up the collections.Counter object attributes. Args: collections_counter (collections.Counter): counter. Returns: dict[str, object]: JSON serialized objects. Raises: TypeError: if not an instance of collections.Counter. """ if not isinstance(collections_counter, collections.Counter): raise TypeError json_dict = {'__type__': 'collections.Counter'} for attribute_name, attribute_value in collections_counter.items(): if attribute_value is None: continue if isinstance(attribute_value, bytes): attribute_value = { '__type__': 'bytes', 'stream': '{0:s}'.format(binascii.b2a_qp(attribute_value)) } json_dict[attribute_name] = attribute_value return json_dict # Pylint is confused by the formatting of the return type. # pylint: disable=missing-return-type-doc @classmethod def _ConvertDictToObject(cls, json_dict): """Converts a JSON dict into an object. The dictionary of the JSON serialized objects consists of: { '__type__': 'AttributeContainer' '__container_type__': ... ... } Here '__type__' indicates the object base type. In this case 'AttributeContainer'. '__container_type__' indicates the attribute container type. The rest of the elements of the dictionary make up the attributes. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: AttributeContainer|dict|list|tuple: deserialized object. Raises: ValueError: if the class type, container type or attribute type of event data container is not supported. """ # Use __type__ to indicate the object class type. class_type = json_dict.get('__type__', None) if not class_type: # Dealing with a regular dict. return json_dict if class_type == 'bytes': return binascii.a2b_qp(json_dict['stream']) if class_type == 'tuple': return tuple(cls._ConvertListToObject(json_dict['values'])) if class_type == 'collections.Counter': return cls._ConvertDictToCollectionsCounter(json_dict) if class_type == 'AttributeContainer': # Use __container_type__ to indicate the attribute container type. container_type = json_dict.get('__container_type__', None) # Since we would like the JSON as flat as possible we handle decoding # date time values. elif class_type == 'DateTimeValues': return cls._ConvertDictToDateTimeValues(json_dict) # Since we would like the JSON as flat as possible we handle decoding # a path specification. elif class_type == 'PathSpec': return cls._ConvertDictToPathSpec(json_dict) else: raise ValueError('Unsupported class type: {0:s}'.format(class_type)) container_object = ( containers_manager.AttributeContainersManager.CreateAttributeContainer( container_type)) supported_attribute_names = container_object.GetAttributeNames() for attribute_name, attribute_value in json_dict.items(): # Convert attribute names to provide backwards compatibility for previous # variants of attribute containers. if (container_type == 'event' and attribute_name == 'event_data_row_identifier'): attribute_name = '_event_data_row_identifier' elif (container_type == 'event_tag' and attribute_name == 'event_row_identifier'): attribute_name = '_event_row_identifier' # Backwards compatibility for older session attribute containers that # contain session configuration attributes. if (container_type == 'session_start' and attribute_name in cls._SESSION_START_LEGACY_ATTRIBUTE_NAMES): pass # Be strict about which attributes to set in non event data attribute # containers. elif (container_type != 'event_data' and attribute_name not in supported_attribute_names): if attribute_name not in ('__container_type__', '__type__'): logger.debug(( '[ConvertDictToObject] unsupported attribute name: ' '{0:s}.{1:s}').format(container_type, attribute_name)) continue if isinstance(attribute_value, dict): attribute_value = cls._ConvertDictToObject(attribute_value) elif isinstance(attribute_value, list): attribute_value = cls._ConvertListToObject(attribute_value) if container_type == 'event_data': if isinstance(attribute_value, bytes): raise ValueError(( 'Event data attribute value: {0:s} of type bytes is not ' 'supported.').format(attribute_name)) if isinstance(attribute_value, dict): raise ValueError(( 'Event data attribute value: {0:s} of type dict is not ' 'supported.').format(attribute_name)) setattr(container_object, attribute_name, attribute_value) return container_object @classmethod def _ConvertDictToCollectionsCounter(cls, json_dict): """Converts a JSON dict into a collections.Counter. The dictionary of the JSON serialized objects consists of: { '__type__': 'collections.Counter' ... } Here '__type__' indicates the object base type. In this case this should be 'collections.Counter'. The rest of the elements of the dictionary make up the preprocessing object properties. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: collections.Counter: counter. """ collections_counter = collections.Counter() for key, value in json_dict.items(): if key == '__type__': continue collections_counter[key] = value return collections_counter @classmethod def _ConvertListToObject(cls, json_list): """Converts a JSON list into an object. Args: json_list (list[object]): JSON serialized objects. Returns: list[object]: a deserialized list. """ list_value = [] for json_list_element in json_list: if isinstance(json_list_element, dict): list_value.append(cls._ConvertDictToObject(json_list_element)) elif isinstance(json_list_element, list): list_value.append(cls._ConvertListToObject(json_list_element)) else: list_value.append(json_list_element) return list_value @classmethod def _ConvertDictToDateTimeValues(cls, json_dict): """Converts a JSON dict into a date time values object. The dictionary of the JSON serialized objects consists of: { '__type__': 'DateTimeValues' '__class_name__': 'RFC2579DateTime' ... } Here '__type__' indicates the object base type. In this case this should be 'DateTimeValues'. The rest of the elements of the dictionary make up the date time values object properties. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: dfdatetime.DateTimeValues: date and time values. """ class_name = json_dict.get('__class_name__', None) if class_name: del json_dict['__class_name__'] # Remove the class type from the JSON dict since we cannot pass it. del json_dict['__type__'] is_local_time = json_dict.get('is_local_time', None) if is_local_time is not None: del json_dict['is_local_time'] if class_name in ('InvalidTime', 'Never', 'NotSet'): string = json_dict.get('string', None) if string is not None: del json_dict['string'] date_time = dfdatetime_factory.Factory.NewDateTimeValues( class_name, **json_dict) if is_local_time: date_time.is_local_time = is_local_time return date_time @classmethod def _ConvertDictToPathSpec(cls, json_dict): """Converts a JSON dict into a path specification object. The dictionary of the JSON serialized objects consists of: { '__type__': 'PathSpec' 'type_indicator': 'OS' 'parent': { ... } ... } Here '__type__' indicates the object base type. In this case this should be 'PathSpec'. The rest of the elements of the dictionary make up the path specification object properties. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: dfvfs.PathSpec: path specification. """ type_indicator = json_dict.get('type_indicator', None) if type_indicator: del json_dict['type_indicator'] if 'parent' in json_dict: json_dict['parent'] = cls._ConvertDictToPathSpec(json_dict['parent']) # Remove the class type from the JSON dict since we cannot pass it. del json_dict['__type__'] path_spec = dfvfs_path_spec_factory.Factory.NewPathSpec( type_indicator, **json_dict) if type_indicator == dfvfs_definitions.TYPE_INDICATOR_OS: # dfvfs.OSPathSpec() will change the location to an absolute path # here we want to preserve the original location. path_spec.location = json_dict.get('location', None) return path_spec @classmethod def _ConvertDateTimeValuesToDict(cls, date_time_values): """Converts a date and time values object into a JSON dictionary. The resulting dictionary of the JSON serialized objects consists of: { '__type__': 'DateTimeValues' '__class_name__': 'RFC2579DateTime' ... } Here '__type__' indicates the object base type. In this case 'DateTimeValues'. The rest of the elements of the dictionary make up the date and time value object properties. Args: date_time_values (dfdatetime.DateTimeValues): date and time values. Returns: dict[str, object]: JSON serialized objects. Raises: TypeError: if not an instance of dfdatetime.DateTimeValues. """ if not isinstance(date_time_values, dfdatetime_interface.DateTimeValues): raise TypeError class_name = type(date_time_values).__name__ json_dict = { '__class_name__': class_name, '__type__': 'DateTimeValues'} if hasattr(date_time_values, 'timestamp'): json_dict['timestamp'] = date_time_values.timestamp elif hasattr(date_time_values, 'string'): json_dict['string'] = date_time_values.string elif class_name == 'FATDateTime': json_dict['fat_date_time'] = date_time_values.fat_date_time elif class_name == 'RFC2579DateTime': json_dict['rfc2579_date_time_tuple'] = ( date_time_values.year, date_time_values.month, date_time_values.day_of_month, date_time_values.hours, date_time_values.minutes, date_time_values.seconds, date_time_values.deciseconds) elif class_name == 'TimeElements': json_dict['time_elements_tuple'] = ( date_time_values.year, date_time_values.month, date_time_values.day_of_month, date_time_values.hours, date_time_values.minutes, date_time_values.seconds) elif class_name == 'TimeElementsInMilliseconds': json_dict['time_elements_tuple'] = ( date_time_values.year, date_time_values.month, date_time_values.day_of_month, date_time_values.hours, date_time_values.minutes, date_time_values.seconds, date_time_values.milliseconds) elif class_name == 'TimeElementsInMicroseconds': json_dict['time_elements_tuple'] = ( date_time_values.year, date_time_values.month, date_time_values.day_of_month, date_time_values.hours, date_time_values.minutes, date_time_values.seconds, date_time_values.microseconds) if date_time_values.is_local_time: json_dict['is_local_time'] = True return json_dict @classmethod def _ConvertPathSpecToDict(cls, path_spec_object): """Converts a path specification object into a JSON dictionary. The resulting dictionary of the JSON serialized objects consists of: { '__type__': 'PathSpec' 'type_indicator': 'OS' 'parent': { ... } ... } Here '__type__' indicates the object base type. In this case 'PathSpec'. The rest of the elements of the dictionary make up the path specification object properties. The supported property names are defined in path_spec_factory.Factory.PROPERTY_NAMES. Note that this method is called recursively for every path specification object and creates a dict of dicts in the process. Args: path_spec_object (dfvfs.PathSpec): path specification. Returns: dict[str, object]: JSON serialized objects. Raises: TypeError: if not an instance of dfvfs.PathSpec. """ if not isinstance(path_spec_object, dfvfs_path_spec.PathSpec): raise TypeError json_dict = {'__type__': 'PathSpec'} for property_name in dfvfs_path_spec_factory.Factory.PROPERTY_NAMES: property_value = getattr(path_spec_object, property_name, None) if property_value is not None: json_dict[property_name] = property_value if path_spec_object.HasParent(): json_dict['parent'] = cls._ConvertPathSpecToDict(path_spec_object.parent) json_dict['type_indicator'] = path_spec_object.type_indicator location = getattr(path_spec_object, 'location', None) if location: json_dict['location'] = location return json_dict @classmethod def ReadSerialized(cls, json_string): # pylint: disable=arguments-differ """Reads an attribute container from serialized form. Args: json_string (str): JSON serialized attribute container. Returns: AttributeContainer: attribute container or None. """ if json_string: json_dict = json.loads(json_string) return cls.ReadSerializedDict(json_dict) return None @classmethod def ReadSerializedDict(cls, json_dict): """Reads an attribute container from serialized dictionary form. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: AttributeContainer: attribute container or None. Raises: TypeError: if the serialized dictionary does not contain an AttributeContainer. """ if json_dict: json_object = cls._ConvertDictToObject(json_dict) if not isinstance(json_object, containers_interface.AttributeContainer): raise TypeError('{0!s} is not an attribute container type.'.format( type(json_object))) return json_object return None @classmethod def WriteSerialized(cls, attribute_container): """Writes an attribute container to serialized form. Args: attribute_container (AttributeContainer): attribute container. Returns: str: A JSON string containing the serialized form. """ json_dict = cls.WriteSerializedDict(attribute_container) return json.dumps(json_dict) @classmethod def WriteSerializedDict(cls, attribute_container): """Writes an attribute container to serialized form. Args: attribute_container (AttributeContainer): attribute container. Returns: dict[str, object]: JSON serialized objects. """ return cls._ConvertAttributeContainerToDict(attribute_container)
"""Support for Hass.io.""" from __future__ import annotations from datetime import timedelta import logging import os from typing import Any, NamedTuple import voluptuous as vol from homeassistant.auth.const import GROUP_ID_ADMIN from homeassistant.components.homeassistant import ( SERVICE_CHECK_CONFIG, SHUTDOWN_SERVICES, ) import homeassistant.config as conf_util from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( ATTR_NAME, ATTR_SERVICE, EVENT_CORE_CONFIG_UPDATE, SERVICE_HOMEASSISTANT_RESTART, SERVICE_HOMEASSISTANT_STOP, ) from homeassistant.core import DOMAIN as HASS_DOMAIN, HomeAssistant, callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_validation as cv, recorder from homeassistant.helpers.device_registry import DeviceRegistry, async_get_registry from homeassistant.helpers.typing import ConfigType from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from homeassistant.loader import bind_hass from homeassistant.util.dt import utcnow from .addon_panel import async_setup_addon_panel from .auth import async_setup_auth_view from .const import ( ATTR_ADDON, ATTR_ADDONS, ATTR_DISCOVERY, ATTR_FOLDERS, ATTR_HOMEASSISTANT, ATTR_INPUT, ATTR_PASSWORD, ATTR_REPOSITORY, ATTR_SLUG, ATTR_URL, ATTR_VERSION, DOMAIN, SupervisorEntityModel, ) from .discovery import async_setup_discovery_view from .handler import HassIO, HassioAPIError, api_data from .http import HassIOView from .ingress import async_setup_ingress_view from .websocket_api import async_load_websocket_api _LOGGER = logging.getLogger(__name__) STORAGE_KEY = DOMAIN STORAGE_VERSION = 1 PLATFORMS = ["binary_sensor", "sensor"] CONF_FRONTEND_REPO = "development_repo" CONFIG_SCHEMA = vol.Schema( {vol.Optional(DOMAIN): vol.Schema({vol.Optional(CONF_FRONTEND_REPO): cv.isdir})}, extra=vol.ALLOW_EXTRA, ) DATA_CORE_INFO = "hassio_core_info" DATA_HOST_INFO = "hassio_host_info" DATA_STORE = "hassio_store" DATA_INFO = "hassio_info" DATA_OS_INFO = "hassio_os_info" DATA_SUPERVISOR_INFO = "hassio_supervisor_info" HASSIO_UPDATE_INTERVAL = timedelta(minutes=55) ADDONS_COORDINATOR = "hassio_addons_coordinator" SERVICE_ADDON_START = "addon_start" SERVICE_ADDON_STOP = "addon_stop" SERVICE_ADDON_RESTART = "addon_restart" SERVICE_ADDON_UPDATE = "addon_update" SERVICE_ADDON_STDIN = "addon_stdin" SERVICE_HOST_SHUTDOWN = "host_shutdown" SERVICE_HOST_REBOOT = "host_reboot" SERVICE_BACKUP_FULL = "backup_full" SERVICE_BACKUP_PARTIAL = "backup_partial" SERVICE_RESTORE_FULL = "restore_full" SERVICE_RESTORE_PARTIAL = "restore_partial" SCHEMA_NO_DATA = vol.Schema({}) SCHEMA_ADDON = vol.Schema({vol.Required(ATTR_ADDON): cv.string}) SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend( {vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)} ) SCHEMA_BACKUP_FULL = vol.Schema( {vol.Optional(ATTR_NAME): cv.string, vol.Optional(ATTR_PASSWORD): cv.string} ) SCHEMA_BACKUP_PARTIAL = SCHEMA_BACKUP_FULL.extend( { vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]), vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]), } ) SCHEMA_RESTORE_FULL = vol.Schema( { vol.Required(ATTR_SLUG): cv.slug, vol.Optional(ATTR_PASSWORD): cv.string, } ) SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend( { vol.Optional(ATTR_HOMEASSISTANT): cv.boolean, vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]), vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]), } ) class APIEndpointSettings(NamedTuple): """Settings for API endpoint.""" command: str schema: vol.Schema timeout: int | None = 60 pass_data: bool = False MAP_SERVICE_API = { SERVICE_ADDON_START: APIEndpointSettings("/addons/{addon}/start", SCHEMA_ADDON), SERVICE_ADDON_STOP: APIEndpointSettings("/addons/{addon}/stop", SCHEMA_ADDON), SERVICE_ADDON_RESTART: APIEndpointSettings("/addons/{addon}/restart", SCHEMA_ADDON), SERVICE_ADDON_UPDATE: APIEndpointSettings("/addons/{addon}/update", SCHEMA_ADDON), SERVICE_ADDON_STDIN: APIEndpointSettings( "/addons/{addon}/stdin", SCHEMA_ADDON_STDIN ), SERVICE_HOST_SHUTDOWN: APIEndpointSettings("/host/shutdown", SCHEMA_NO_DATA), SERVICE_HOST_REBOOT: APIEndpointSettings("/host/reboot", SCHEMA_NO_DATA), SERVICE_BACKUP_FULL: APIEndpointSettings( "/backups/new/full", SCHEMA_BACKUP_FULL, None, True, ), SERVICE_BACKUP_PARTIAL: APIEndpointSettings( "/backups/new/partial", SCHEMA_BACKUP_PARTIAL, None, True, ), SERVICE_RESTORE_FULL: APIEndpointSettings( "/backups/{slug}/restore/full", SCHEMA_RESTORE_FULL, None, True, ), SERVICE_RESTORE_PARTIAL: APIEndpointSettings( "/backups/{slug}/restore/partial", SCHEMA_RESTORE_PARTIAL, None, True, ), } @bind_hass async def async_get_addon_info(hass: HomeAssistant, slug: str) -> dict: """Return add-on info. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] return await hassio.get_addon_info(slug) @bind_hass async def async_update_diagnostics(hass: HomeAssistant, diagnostics: bool) -> dict: """Update Supervisor diagnostics toggle. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] return await hassio.update_diagnostics(diagnostics) @bind_hass @api_data async def async_install_addon(hass: HomeAssistant, slug: str) -> dict: """Install add-on. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] command = f"/addons/{slug}/install" return await hassio.send_command(command, timeout=None) @bind_hass @api_data async def async_uninstall_addon(hass: HomeAssistant, slug: str) -> dict: """Uninstall add-on. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] command = f"/addons/{slug}/uninstall" return await hassio.send_command(command, timeout=60) @bind_hass @api_data async def async_update_addon(hass: HomeAssistant, slug: str) -> dict: """Update add-on. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] command = f"/addons/{slug}/update" return await hassio.send_command(command, timeout=None) @bind_hass @api_data async def async_start_addon(hass: HomeAssistant, slug: str) -> dict: """Start add-on. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] command = f"/addons/{slug}/start" return await hassio.send_command(command, timeout=60) @bind_hass @api_data async def async_restart_addon(hass: HomeAssistant, slug: str) -> dict: """Restart add-on. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] command = f"/addons/{slug}/restart" return await hassio.send_command(command, timeout=None) @bind_hass @api_data async def async_stop_addon(hass: HomeAssistant, slug: str) -> dict: """Stop add-on. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] command = f"/addons/{slug}/stop" return await hassio.send_command(command, timeout=60) @bind_hass @api_data async def async_set_addon_options( hass: HomeAssistant, slug: str, options: dict ) -> dict: """Set add-on options. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] command = f"/addons/{slug}/options" return await hassio.send_command(command, payload=options) @bind_hass async def async_get_addon_discovery_info(hass: HomeAssistant, slug: str) -> dict | None: """Return discovery data for an add-on.""" hassio = hass.data[DOMAIN] data = await hassio.retrieve_discovery_messages() discovered_addons = data[ATTR_DISCOVERY] return next((addon for addon in discovered_addons if addon["addon"] == slug), None) @bind_hass @api_data async def async_create_backup( hass: HomeAssistant, payload: dict, partial: bool = False ) -> dict: """Create a full or partial backup. The caller of the function should handle HassioAPIError. """ hassio = hass.data[DOMAIN] backup_type = "partial" if partial else "full" command = f"/backups/new/{backup_type}" return await hassio.send_command(command, payload=payload, timeout=None) @callback @bind_hass def get_info(hass): """Return generic information from Supervisor. Async friendly. """ return hass.data.get(DATA_INFO) @callback @bind_hass def get_host_info(hass): """Return generic host information. Async friendly. """ return hass.data.get(DATA_HOST_INFO) @callback @bind_hass def get_store(hass): """Return store information. Async friendly. """ return hass.data.get(DATA_STORE) @callback @bind_hass def get_supervisor_info(hass): """Return Supervisor information. Async friendly. """ return hass.data.get(DATA_SUPERVISOR_INFO) @callback @bind_hass def get_os_info(hass): """Return OS information. Async friendly. """ return hass.data.get(DATA_OS_INFO) @callback @bind_hass def get_core_info(hass): """Return Home Assistant Core information from Supervisor. Async friendly. """ return hass.data.get(DATA_CORE_INFO) @callback @bind_hass def is_hassio(hass: HomeAssistant) -> bool: """Return true if Hass.io is loaded. Async friendly. """ return DOMAIN in hass.config.components @callback def get_supervisor_ip(): """Return the supervisor ip address.""" if "SUPERVISOR" not in os.environ: return None return os.environ["SUPERVISOR"].partition(":")[0] async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: # noqa: C901 """Set up the Hass.io component.""" # Check local setup for env in ("HASSIO", "HASSIO_TOKEN"): if os.environ.get(env): continue _LOGGER.error("Missing %s environment variable", env) if config_entries := hass.config_entries.async_entries(DOMAIN): hass.async_create_task( hass.config_entries.async_remove(config_entries[0].entry_id) ) return False async_load_websocket_api(hass) host = os.environ["HASSIO"] websession = hass.helpers.aiohttp_client.async_get_clientsession() hass.data[DOMAIN] = hassio = HassIO(hass.loop, websession, host) if not await hassio.is_connected(): _LOGGER.warning("Not connected with the supervisor / system too busy!") store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY) data = await store.async_load() if data is None: data = {} refresh_token = None if "hassio_user" in data: user = await hass.auth.async_get_user(data["hassio_user"]) if user and user.refresh_tokens: refresh_token = list(user.refresh_tokens.values())[0] # Migrate old Hass.io users to be admin. if not user.is_admin: await hass.auth.async_update_user(user, group_ids=[GROUP_ID_ADMIN]) # Migrate old name if user.name == "Hass.io": await hass.auth.async_update_user(user, name="Supervisor") if refresh_token is None: user = await hass.auth.async_create_system_user("Supervisor", [GROUP_ID_ADMIN]) refresh_token = await hass.auth.async_create_refresh_token(user) data["hassio_user"] = user.id await store.async_save(data) # This overrides the normal API call that would be forwarded development_repo = config.get(DOMAIN, {}).get(CONF_FRONTEND_REPO) if development_repo is not None: hass.http.register_static_path( "/api/hassio/app", os.path.join(development_repo, "hassio/build"), False ) hass.http.register_view(HassIOView(host, websession)) await hass.components.panel_custom.async_register_panel( frontend_url_path="hassio", webcomponent_name="hassio-main", sidebar_title="Supervisor", sidebar_icon="hass:home-assistant", js_url="/api/hassio/app/entrypoint.js", embed_iframe=True, require_admin=True, ) await hassio.update_hass_api(config.get("http", {}), refresh_token) last_timezone = None async def push_config(_): """Push core config to Hass.io.""" nonlocal last_timezone new_timezone = str(hass.config.time_zone) if new_timezone == last_timezone: return last_timezone = new_timezone await hassio.update_hass_timezone(new_timezone) hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, push_config) await push_config(None) async def async_service_handler(service): """Handle service calls for Hass.io.""" api_endpoint = MAP_SERVICE_API[service.service] data = service.data.copy() addon = data.pop(ATTR_ADDON, None) slug = data.pop(ATTR_SLUG, None) payload = None # Pass data to Hass.io API if service.service == SERVICE_ADDON_STDIN: payload = data[ATTR_INPUT] elif api_endpoint.pass_data: payload = data # Call API try: await hassio.send_command( api_endpoint.command.format(addon=addon, slug=slug), payload=payload, timeout=api_endpoint.timeout, ) except HassioAPIError: # The exceptions are logged properly in hassio.send_command pass for service, settings in MAP_SERVICE_API.items(): hass.services.async_register( DOMAIN, service, async_service_handler, schema=settings.schema ) async def update_info_data(now): """Update last available supervisor information.""" try: hass.data[DATA_INFO] = await hassio.get_info() hass.data[DATA_HOST_INFO] = await hassio.get_host_info() hass.data[DATA_STORE] = await hassio.get_store() hass.data[DATA_CORE_INFO] = await hassio.get_core_info() hass.data[DATA_SUPERVISOR_INFO] = await hassio.get_supervisor_info() hass.data[DATA_OS_INFO] = await hassio.get_os_info() if ADDONS_COORDINATOR in hass.data: await hass.data[ADDONS_COORDINATOR].async_refresh() except HassioAPIError as err: _LOGGER.warning("Can't read last version: %s", err) hass.helpers.event.async_track_point_in_utc_time( update_info_data, utcnow() + HASSIO_UPDATE_INTERVAL ) # Fetch last version await update_info_data(None) async def async_handle_core_service(call): """Service handler for handling core services.""" if ( call.service in SHUTDOWN_SERVICES and await recorder.async_migration_in_progress(hass) ): _LOGGER.error( "The system cannot %s while a database upgrade is in progress", call.service, ) raise HomeAssistantError( f"The system cannot {call.service} " "while a database upgrade is in progress." ) if call.service == SERVICE_HOMEASSISTANT_STOP: await hassio.stop_homeassistant() return errors = await conf_util.async_check_ha_config_file(hass) if errors: _LOGGER.error( "The system cannot %s because the configuration is not valid: %s", call.service, errors, ) hass.components.persistent_notification.async_create( "Config error. See [the logs](/config/logs) for details.", "Config validating", f"{HASS_DOMAIN}.check_config", ) raise HomeAssistantError( f"The system cannot {call.service} " f"because the configuration is not valid: {errors}" ) if call.service == SERVICE_HOMEASSISTANT_RESTART: await hassio.restart_homeassistant() # Mock core services for service in ( SERVICE_HOMEASSISTANT_STOP, SERVICE_HOMEASSISTANT_RESTART, SERVICE_CHECK_CONFIG, ): hass.services.async_register(HASS_DOMAIN, service, async_handle_core_service) # Init discovery Hass.io feature async_setup_discovery_view(hass, hassio) # Init auth Hass.io feature async_setup_auth_view(hass, user) # Init ingress Hass.io feature async_setup_ingress_view(hass, host) # Init add-on ingress panels await async_setup_addon_panel(hass, hassio) hass.async_create_task( hass.config_entries.flow.async_init(DOMAIN, context={"source": "system"}) ) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up a config entry.""" dev_reg = await async_get_registry(hass) coordinator = HassioDataUpdateCoordinator(hass, entry, dev_reg) hass.data[ADDONS_COORDINATOR] = coordinator await coordinator.async_refresh() hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) # Pop add-on data hass.data.pop(ADDONS_COORDINATOR, None) return unload_ok @callback def async_register_addons_in_dev_reg( entry_id: str, dev_reg: DeviceRegistry, addons: list[dict[str, Any]] ) -> None: """Register addons in the device registry.""" for addon in addons: params = { "config_entry_id": entry_id, "identifiers": {(DOMAIN, addon[ATTR_SLUG])}, "model": SupervisorEntityModel.ADDON, "sw_version": addon[ATTR_VERSION], "name": addon[ATTR_NAME], "entry_type": ATTR_SERVICE, } if manufacturer := addon.get(ATTR_REPOSITORY) or addon.get(ATTR_URL): params["manufacturer"] = manufacturer dev_reg.async_get_or_create(**params) @callback def async_register_os_in_dev_reg( entry_id: str, dev_reg: DeviceRegistry, os_dict: dict[str, Any] ) -> None: """Register OS in the device registry.""" params = { "config_entry_id": entry_id, "identifiers": {(DOMAIN, "OS")}, "manufacturer": "Home Assistant", "model": SupervisorEntityModel.OS, "sw_version": os_dict[ATTR_VERSION], "name": "Home Assistant Operating System", "entry_type": ATTR_SERVICE, } dev_reg.async_get_or_create(**params) @callback def async_remove_addons_from_dev_reg(dev_reg: DeviceRegistry, addons: set[str]) -> None: """Remove addons from the device registry.""" for addon_slug in addons: if dev := dev_reg.async_get_device({(DOMAIN, addon_slug)}): dev_reg.async_remove_device(dev.id) class HassioDataUpdateCoordinator(DataUpdateCoordinator): """Class to retrieve Hass.io status.""" def __init__( self, hass: HomeAssistant, config_entry: ConfigEntry, dev_reg: DeviceRegistry ) -> None: """Initialize coordinator.""" super().__init__( hass, _LOGGER, name=DOMAIN, update_method=self._async_update_data, ) self.data = {} self.entry_id = config_entry.entry_id self.dev_reg = dev_reg self.is_hass_os = "hassos" in get_info(self.hass) async def _async_update_data(self) -> dict[str, Any]: """Update data via library.""" new_data = {} supervisor_info = get_supervisor_info(self.hass) store_data = get_store(self.hass) repositories = { repo[ATTR_SLUG]: repo[ATTR_NAME] for repo in store_data.get("repositories", []) } new_data["addons"] = { addon[ATTR_SLUG]: { **addon, ATTR_REPOSITORY: repositories.get( addon.get(ATTR_REPOSITORY), addon.get(ATTR_REPOSITORY, "") ), } for addon in supervisor_info.get("addons", []) } if self.is_hass_os: new_data["os"] = get_os_info(self.hass) # If this is the initial refresh, register all addons and return the dict if not self.data: async_register_addons_in_dev_reg( self.entry_id, self.dev_reg, new_data["addons"].values() ) if self.is_hass_os: async_register_os_in_dev_reg( self.entry_id, self.dev_reg, new_data["os"] ) # Remove add-ons that are no longer installed from device registry supervisor_addon_devices = { list(device.identifiers)[0][1] for device in self.dev_reg.devices.values() if self.entry_id in device.config_entries and device.model == SupervisorEntityModel.ADDON } if stale_addons := supervisor_addon_devices - set(new_data["addons"]): async_remove_addons_from_dev_reg(self.dev_reg, stale_addons) # If there are new add-ons, we should reload the config entry so we can # create new devices and entities. We can return an empty dict because # coordinator will be recreated. if self.data and set(new_data["addons"]) - set(self.data["addons"]): self.hass.async_create_task( self.hass.config_entries.async_reload(self.entry_id) ) return {} return new_data
from datetime import ( datetime, timedelta, ) import operator import numpy as np import pytest import pytz from pandas._libs.tslibs import iNaT from pandas.core.dtypes.common import is_datetime64_any_dtype from pandas import ( DatetimeIndex, DatetimeTZDtype, Index, NaT, Period, Series, Timedelta, TimedeltaIndex, Timestamp, isna, offsets, ) import pandas._testing as tm from pandas.core.arrays import ( DatetimeArray, PeriodArray, TimedeltaArray, ) from pandas.core.ops import roperator @pytest.mark.parametrize( "nat,idx", [ (Timestamp("NaT"), DatetimeArray), (Timedelta("NaT"), TimedeltaIndex), (Period("NaT", freq="M"), PeriodArray), ], ) def test_nat_fields(nat, idx): for field in idx._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue result = getattr(NaT, field) assert np.isnan(result) result = getattr(nat, field) assert np.isnan(result) for field in idx._bool_ops: result = getattr(NaT, field) assert result is False result = getattr(nat, field) assert result is False def test_nat_vector_field_access(): idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"]) for field in DatetimeIndex._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue if field in ["week", "weekofyear"]: # GH#33595 Deprecate week and weekofyear continue result = getattr(idx, field) expected = Index([getattr(x, field) for x in idx]) tm.assert_index_equal(result, expected) ser = Series(idx) for field in DatetimeArray._field_ops: # weekday is a property of DTI, but a method # on NaT/Timestamp for compat with datetime if field == "weekday": continue if field in ["week", "weekofyear"]: # GH#33595 Deprecate week and weekofyear continue result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) for field in DatetimeArray._bool_ops: result = getattr(ser.dt, field) expected = [getattr(x, field) for x in idx] tm.assert_series_equal(result, Series(expected)) @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat"]) def test_identity(klass, value): assert klass(value) is NaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period]) @pytest.mark.parametrize("value", ["", "nat", "NAT", None, np.nan]) def test_equality(klass, value): if klass is Period and value == "": pytest.skip("Period cannot parse empty string") assert klass(value).value == iNaT @pytest.mark.parametrize("klass", [Timestamp, Timedelta]) @pytest.mark.parametrize("method", ["round", "floor", "ceil"]) @pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"]) def test_round_nat(klass, method, freq): # see gh-14940 ts = klass("nat") round_method = getattr(ts, method) assert round_method(freq) is ts @pytest.mark.parametrize( "method", [ "astimezone", "combine", "ctime", "dst", "fromordinal", "fromtimestamp", "fromisocalendar", "isocalendar", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", "toordinal", "tzname", "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", "timestamp", ], ) def test_nat_methods_raise(method): # see gh-9513, gh-17329 msg = f"NaTType does not support {method}" with pytest.raises(ValueError, match=msg): getattr(NaT, method)() @pytest.mark.parametrize("method", ["weekday", "isoweekday"]) def test_nat_methods_nan(method): # see gh-9513, gh-17329 assert np.isnan(getattr(NaT, method)()) @pytest.mark.parametrize( "method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"] ) def test_nat_methods_nat(method): # see gh-8254, gh-9513, gh-17329 assert getattr(NaT, method)() is NaT @pytest.mark.parametrize( "get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)] ) def test_nat_iso_format(get_nat): # see gh-12300 assert get_nat("NaT").isoformat() == "NaT" @pytest.mark.parametrize( "klass,expected", [ (Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]), ( Timedelta, [ "components", "delta", "is_populated", "resolution_string", "to_pytimedelta", "to_timedelta64", "view", ], ), ], ) def test_missing_public_nat_methods(klass, expected): # see gh-17327 # # NaT should have *most* of the Timestamp and Timedelta methods. # Here, we check which public methods NaT does not have. We # ignore any missing private methods. nat_names = dir(NaT) klass_names = dir(klass) missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")] missing.sort() assert missing == expected def _get_overlap_public_nat_methods(klass, as_tuple=False): """ Get overlapping public methods between NaT and another class. Parameters ---------- klass : type The class to compare with NaT as_tuple : bool, default False Whether to return a list of tuples of the form (klass, method). Returns ------- overlap : list """ nat_names = dir(NaT) klass_names = dir(klass) overlap = [ x for x in nat_names if x in klass_names and not x.startswith("_") and callable(getattr(klass, x)) ] # Timestamp takes precedence over Timedelta in terms of overlap. if klass is Timedelta: ts_names = dir(Timestamp) overlap = [x for x in overlap if x not in ts_names] if as_tuple: overlap = [(klass, method) for method in overlap] overlap.sort() return overlap @pytest.mark.parametrize( "klass,expected", [ ( Timestamp, [ "astimezone", "ceil", "combine", "ctime", "date", "day_name", "dst", "floor", "fromisocalendar", "fromisoformat", "fromordinal", "fromtimestamp", "isocalendar", "isoformat", "isoweekday", "month_name", "now", "replace", "round", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", "to_datetime64", "to_numpy", "to_pydatetime", "today", "toordinal", "tz_convert", "tz_localize", "tzname", "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", "weekday", ], ), (Timedelta, ["total_seconds"]), ], ) def test_overlap_public_nat_methods(klass, expected): # see gh-17327 # # NaT should have *most* of the Timestamp and Timedelta methods. # In case when Timestamp, Timedelta, and NaT are overlap, the overlap # is considered to be with Timestamp and NaT, not Timedelta. assert _get_overlap_public_nat_methods(klass) == expected @pytest.mark.parametrize( "compare", ( _get_overlap_public_nat_methods(Timestamp, True) + _get_overlap_public_nat_methods(Timedelta, True) ), ) def test_nat_doc_strings(compare): # see gh-17327 # # The docstrings for overlapping methods should match. klass, method = compare klass_doc = getattr(klass, method).__doc__ nat_doc = getattr(NaT, method).__doc__ assert klass_doc == nat_doc _ops = { "left_plus_right": lambda a, b: a + b, "right_plus_left": lambda a, b: b + a, "left_minus_right": lambda a, b: a - b, "right_minus_left": lambda a, b: b - a, "left_times_right": lambda a, b: a * b, "right_times_left": lambda a, b: b * a, "left_div_right": lambda a, b: a / b, "right_div_left": lambda a, b: b / a, } @pytest.mark.parametrize("op_name", list(_ops.keys())) @pytest.mark.parametrize( "value,val_type", [ (2, "scalar"), (1.5, "floating"), (np.nan, "floating"), ("foo", "str"), (timedelta(3600), "timedelta"), (Timedelta("5s"), "timedelta"), (datetime(2014, 1, 1), "timestamp"), (Timestamp("2014-01-01"), "timestamp"), (Timestamp("2014-01-01", tz="UTC"), "timestamp"), (Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"), (pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"), ], ) def test_nat_arithmetic_scalar(op_name, value, val_type): # see gh-6873 invalid_ops = { "scalar": {"right_div_left"}, "floating": { "right_div_left", "left_minus_right", "right_minus_left", "left_plus_right", "right_plus_left", }, "str": set(_ops.keys()), "timedelta": {"left_times_right", "right_times_left"}, "timestamp": { "left_times_right", "right_times_left", "left_div_right", "right_div_left", }, } op = _ops[op_name] if op_name in invalid_ops.get(val_type, set()): if ( val_type == "timedelta" and "times" in op_name and isinstance(value, Timedelta) ): typs = "(Timedelta|NaTType)" msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'" elif val_type == "str": # un-specific check here because the message comes from str # and varies by method msg = "|".join( [ "can only concatenate str", "unsupported operand type", "can't multiply sequence", "Can't convert 'NaTType'", "must be str, not NaTType", ] ) else: msg = "unsupported operand type" with pytest.raises(TypeError, match=msg): op(NaT, value) else: if val_type == "timedelta" and "div" in op_name: expected = np.nan else: expected = NaT assert op(NaT, value) is expected @pytest.mark.parametrize( "val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)] ) def test_nat_rfloordiv_timedelta(val, expected): # see gh-#18846 # # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv td = Timedelta(hours=3, minutes=4) assert td // val is expected @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) @pytest.mark.parametrize( "value", [ DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"), DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"), DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]), DatetimeArray._from_sequence( ["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific") ), TimedeltaIndex(["1 day", "2 day"], name="x"), ], ) def test_nat_arithmetic_index(op_name, value): # see gh-11718 exp_name = "x" exp_data = [NaT] * 2 if is_datetime64_any_dtype(value.dtype) and "plus" in op_name: expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name) else: expected = TimedeltaIndex(exp_data, name=exp_name) if not isinstance(value, Index): expected = expected.array op = _ops[op_name] result = op(NaT, value) tm.assert_equal(result, expected) @pytest.mark.parametrize( "op_name", ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"], ) @pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence]) def test_nat_arithmetic_td64_vector(op_name, box): # see gh-19124 vec = box(["1 day", "2 day"], dtype="timedelta64[ns]") box_nat = box([NaT, NaT], dtype="timedelta64[ns]") tm.assert_equal(_ops[op_name](vec, NaT), box_nat) @pytest.mark.parametrize( "dtype,op,out_dtype", [ ("datetime64[ns]", operator.add, "datetime64[ns]"), ("datetime64[ns]", roperator.radd, "datetime64[ns]"), ("datetime64[ns]", operator.sub, "timedelta64[ns]"), ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"), ("timedelta64[ns]", operator.add, "datetime64[ns]"), ("timedelta64[ns]", roperator.radd, "datetime64[ns]"), ("timedelta64[ns]", operator.sub, "datetime64[ns]"), ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"), ], ) def test_nat_arithmetic_ndarray(dtype, op, out_dtype): other = np.arange(10).astype(dtype) result = op(NaT, other) expected = np.empty(other.shape, dtype=out_dtype) expected.fill("NaT") tm.assert_numpy_array_equal(result, expected) def test_nat_pinned_docstrings(): # see gh-17327 assert NaT.ctime.__doc__ == datetime.ctime.__doc__ def test_to_numpy_alias(): # GH 24653: alias .to_numpy() for scalars expected = NaT.to_datetime64() result = NaT.to_numpy() assert isna(expected) and isna(result) @pytest.mark.parametrize( "other", [ Timedelta(0), Timedelta(0).to_pytimedelta(), pytest.param( Timedelta(0).to_timedelta64(), marks=pytest.mark.xfail( reason="td64 doesn't return NotImplemented, see numpy#17017" ), ), Timestamp(0), Timestamp(0).to_pydatetime(), pytest.param( Timestamp(0).to_datetime64(), marks=pytest.mark.xfail( reason="dt64 doesn't return NotImplemented, see numpy#17017" ), ), Timestamp(0).tz_localize("UTC"), NaT, ], ) def test_nat_comparisons(compare_operators_no_eq_ne, other): # GH 26039 opname = compare_operators_no_eq_ne assert getattr(NaT, opname)(other) is False op = getattr(operator, opname.strip("_")) assert op(NaT, other) is False assert op(other, NaT) is False @pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")]) def test_nat_comparisons_numpy(other): # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons # pass, this test can be removed assert not NaT == other assert NaT != other assert not NaT < other assert not NaT > other assert not NaT <= other assert not NaT >= other @pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")]) @pytest.mark.parametrize( "symbol_and_op", [("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)], ) def test_nat_comparisons_invalid(other_and_type, symbol_and_op): # GH#35585 other, other_type = other_and_type symbol, op = symbol_and_op assert not NaT == other assert not other == NaT assert NaT != other assert other != NaT msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'" with pytest.raises(TypeError, match=msg): op(NaT, other) msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'" with pytest.raises(TypeError, match=msg): op(other, NaT) @pytest.mark.parametrize( "other", [ np.array(["foo"] * 2, dtype=object), np.array([2, 3], dtype="int64"), np.array([2.0, 3.5], dtype="float64"), ], ids=["str", "int", "float"], ) def test_nat_comparisons_invalid_ndarray(other): # GH#40722 expected = np.array([False, False]) result = NaT == other tm.assert_numpy_array_equal(result, expected) result = other == NaT tm.assert_numpy_array_equal(result, expected) expected = np.array([True, True]) result = NaT != other tm.assert_numpy_array_equal(result, expected) result = other != NaT tm.assert_numpy_array_equal(result, expected) for symbol, op in [ ("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt), ]: msg = f"'{symbol}' not supported between" with pytest.raises(TypeError, match=msg): op(NaT, other) if other.dtype == np.dtype("object"): # uses the reverse operator, so symbol changes msg = None with pytest.raises(TypeError, match=msg): op(other, NaT) def test_compare_date(): # GH#39151 comparing NaT with date object is deprecated # See also: tests.scalar.timestamps.test_comparisons::test_compare_date dt = Timestamp.now().to_pydatetime().date() for left, right in [(NaT, dt), (dt, NaT)]: assert not left == right assert left != right with tm.assert_produces_warning(FutureWarning): assert not left < right with tm.assert_produces_warning(FutureWarning): assert not left <= right with tm.assert_produces_warning(FutureWarning): assert not left > right with tm.assert_produces_warning(FutureWarning): assert not left >= right # Once the deprecation is enforced, the following assertions # can be enabled: # assert not left == right # assert left != right # # with pytest.raises(TypeError): # left < right # with pytest.raises(TypeError): # left <= right # with pytest.raises(TypeError): # left > right # with pytest.raises(TypeError): # left >= right @pytest.mark.parametrize( "obj", [ offsets.YearEnd(2), offsets.YearBegin(2), offsets.MonthBegin(1), offsets.MonthEnd(2), offsets.MonthEnd(12), offsets.Day(2), offsets.Day(5), offsets.Hour(24), offsets.Hour(3), offsets.Minute(), np.timedelta64(3, "h"), np.timedelta64(4, "h"), np.timedelta64(3200, "s"), np.timedelta64(3600, "s"), np.timedelta64(3600 * 24, "s"), np.timedelta64(2, "D"), np.timedelta64(365, "D"), timedelta(-2), timedelta(365), timedelta(minutes=120), timedelta(days=4, minutes=180), timedelta(hours=23), timedelta(hours=23, minutes=30), timedelta(hours=48), ], ) def test_nat_addsub_tdlike_scalar(obj): assert NaT + obj is NaT assert obj + NaT is NaT assert NaT - obj is NaT def test_pickle(): # GH#4606 p = tm.round_trip_pickle(NaT) assert p is NaT
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import ( Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator, ) from google.cloud.datacatalog_v1beta1.types import datacatalog from google.cloud.datacatalog_v1beta1.types import search from google.cloud.datacatalog_v1beta1.types import tags class SearchCatalogPager: """A pager for iterating through ``search_catalog`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.SearchCatalogResponse` object, and provides an ``__iter__`` method to iterate through its ``results`` field. If there are more pages, the ``__iter__`` method will make additional ``SearchCatalog`` requests and continue to iterate through the ``results`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.SearchCatalogResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., datacatalog.SearchCatalogResponse], request: datacatalog.SearchCatalogRequest, response: datacatalog.SearchCatalogResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.SearchCatalogRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.SearchCatalogResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.SearchCatalogRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterator[datacatalog.SearchCatalogResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterator[search.SearchCatalogResult]: for page in self.pages: yield from page.results def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class SearchCatalogAsyncPager: """A pager for iterating through ``search_catalog`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.SearchCatalogResponse` object, and provides an ``__aiter__`` method to iterate through its ``results`` field. If there are more pages, the ``__aiter__`` method will make additional ``SearchCatalog`` requests and continue to iterate through the ``results`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.SearchCatalogResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., Awaitable[datacatalog.SearchCatalogResponse]], request: datacatalog.SearchCatalogRequest, response: datacatalog.SearchCatalogResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.SearchCatalogRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.SearchCatalogResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.SearchCatalogRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[datacatalog.SearchCatalogResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[search.SearchCatalogResult]: async def async_generator(): async for page in self.pages: for response in page.results: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListEntryGroupsPager: """A pager for iterating through ``list_entry_groups`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.ListEntryGroupsResponse` object, and provides an ``__iter__`` method to iterate through its ``entry_groups`` field. If there are more pages, the ``__iter__`` method will make additional ``ListEntryGroups`` requests and continue to iterate through the ``entry_groups`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.ListEntryGroupsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., datacatalog.ListEntryGroupsResponse], request: datacatalog.ListEntryGroupsRequest, response: datacatalog.ListEntryGroupsResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.ListEntryGroupsRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.ListEntryGroupsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.ListEntryGroupsRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterator[datacatalog.ListEntryGroupsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterator[datacatalog.EntryGroup]: for page in self.pages: yield from page.entry_groups def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListEntryGroupsAsyncPager: """A pager for iterating through ``list_entry_groups`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.ListEntryGroupsResponse` object, and provides an ``__aiter__`` method to iterate through its ``entry_groups`` field. If there are more pages, the ``__aiter__`` method will make additional ``ListEntryGroups`` requests and continue to iterate through the ``entry_groups`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.ListEntryGroupsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., Awaitable[datacatalog.ListEntryGroupsResponse]], request: datacatalog.ListEntryGroupsRequest, response: datacatalog.ListEntryGroupsResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.ListEntryGroupsRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.ListEntryGroupsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.ListEntryGroupsRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[datacatalog.ListEntryGroupsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[datacatalog.EntryGroup]: async def async_generator(): async for page in self.pages: for response in page.entry_groups: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListEntriesPager: """A pager for iterating through ``list_entries`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.ListEntriesResponse` object, and provides an ``__iter__`` method to iterate through its ``entries`` field. If there are more pages, the ``__iter__`` method will make additional ``ListEntries`` requests and continue to iterate through the ``entries`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.ListEntriesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., datacatalog.ListEntriesResponse], request: datacatalog.ListEntriesRequest, response: datacatalog.ListEntriesResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.ListEntriesRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.ListEntriesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.ListEntriesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterator[datacatalog.ListEntriesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterator[datacatalog.Entry]: for page in self.pages: yield from page.entries def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListEntriesAsyncPager: """A pager for iterating through ``list_entries`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.ListEntriesResponse` object, and provides an ``__aiter__`` method to iterate through its ``entries`` field. If there are more pages, the ``__aiter__`` method will make additional ``ListEntries`` requests and continue to iterate through the ``entries`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.ListEntriesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., Awaitable[datacatalog.ListEntriesResponse]], request: datacatalog.ListEntriesRequest, response: datacatalog.ListEntriesResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.ListEntriesRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.ListEntriesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.ListEntriesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[datacatalog.ListEntriesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[datacatalog.Entry]: async def async_generator(): async for page in self.pages: for response in page.entries: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListTagsPager: """A pager for iterating through ``list_tags`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.ListTagsResponse` object, and provides an ``__iter__`` method to iterate through its ``tags`` field. If there are more pages, the ``__iter__`` method will make additional ``ListTags`` requests and continue to iterate through the ``tags`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.ListTagsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., datacatalog.ListTagsResponse], request: datacatalog.ListTagsRequest, response: datacatalog.ListTagsResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.ListTagsRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.ListTagsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.ListTagsRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterator[datacatalog.ListTagsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterator[tags.Tag]: for page in self.pages: yield from page.tags def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListTagsAsyncPager: """A pager for iterating through ``list_tags`` requests. This class thinly wraps an initial :class:`google.cloud.datacatalog_v1beta1.types.ListTagsResponse` object, and provides an ``__aiter__`` method to iterate through its ``tags`` field. If there are more pages, the ``__aiter__`` method will make additional ``ListTags`` requests and continue to iterate through the ``tags`` field on the corresponding responses. All the usual :class:`google.cloud.datacatalog_v1beta1.types.ListTagsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., Awaitable[datacatalog.ListTagsResponse]], request: datacatalog.ListTagsRequest, response: datacatalog.ListTagsResponse, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.datacatalog_v1beta1.types.ListTagsRequest): The initial request object. response (google.cloud.datacatalog_v1beta1.types.ListTagsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = datacatalog.ListTagsRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property async def pages(self) -> AsyncIterator[datacatalog.ListTagsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response def __aiter__(self) -> AsyncIterator[tags.Tag]: async def async_generator(): async for page in self.pages: for response in page.tags: yield response return async_generator() def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUVPortsFetcher from bambou import NURESTObject class NUOverlayMirrorDestination(NURESTObject): """ Represents a OverlayMirrorDestination in the VSD Notes: Overlay mirror destinations are pointed to by advanced forwarding policies as the destination for redirected traffic. Targets can be of two types, L3 or virtual wire. For L3 targets a virtual IP should be provided as it allows the system to track among which of the end-points belonging to the overlay mirror destination is the active one. For this type of redirect the packet's destination MAC address is changed to match that of the Virtual IP. For virtual-wire redirection targets, the packets are untouched and forwarded directly to the end-point. """ __rest_name__ = "overlaymirrordestination" __resource_name__ = "overlaymirrordestinations" ## Constants CONST_DESTINATION_TYPE_REDIRECTION_TARGET = "REDIRECTION_TARGET" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_DESTINATION_TYPE_OVERLAY_MIRROR_DESTINATION = "OVERLAY_MIRROR_DESTINATION" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_END_POINT_TYPE_NONE = "NONE" CONST_TRIGGER_TYPE_NONE = "NONE" CONST_END_POINT_TYPE_VIRTUAL_WIRE = "VIRTUAL_WIRE" CONST_TRIGGER_TYPE_GARP = "GARP" def __init__(self, **kwargs): """ Initializes a OverlayMirrorDestination instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> overlaymirrordestination = NUOverlayMirrorDestination(id=u'xxxx-xxx-xxx-xxx', name=u'OverlayMirrorDestination') >>> overlaymirrordestination = NUOverlayMirrorDestination(data=my_dict) """ super(NUOverlayMirrorDestination, self).__init__() # Read/Write Attributes self._esi = None self._name = None self._last_updated_by = None self._last_updated_date = None self._redundancy_enabled = None self._template_id = None self._description = None self._destination_type = None self._virtual_network_id = None self._embedded_metadata = None self._end_point_type = None self._entity_scope = None self._creation_date = None self._trigger_type = None self._owner = None self._external_id = None self.expose_attribute(local_name="esi", remote_name="ESI", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="redundancy_enabled", remote_name="redundancyEnabled", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="destination_type", remote_name="destinationType", attribute_type=str, is_required=False, is_unique=False, choices=[u'OVERLAY_MIRROR_DESTINATION', u'REDIRECTION_TARGET']) self.expose_attribute(local_name="virtual_network_id", remote_name="virtualNetworkID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="end_point_type", remote_name="endPointType", attribute_type=str, is_required=True, is_unique=False, choices=[u'NONE', u'VIRTUAL_WIRE']) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="trigger_type", remote_name="triggerType", attribute_type=str, is_required=False, is_unique=False, choices=[u'GARP', u'NONE']) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) # Fetchers self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.vports = NUVPortsFetcher.fetcher_with_object(parent_object=self, relationship="member") self._compute_args(**kwargs) # Properties @property def esi(self): """ Get esi value. Notes: ESI id, globally unique This attribute is named `ESI` in VSD API. """ return self._esi @esi.setter def esi(self, value): """ Set esi value. Notes: ESI id, globally unique This attribute is named `ESI` in VSD API. """ self._esi = value @property def name(self): """ Get name value. Notes: Name of this overlay mirror destination """ return self._name @name.setter def name(self, value): """ Set name value. Notes: Name of this overlay mirror destination """ self._name = value @property def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ self._last_updated_by = value @property def last_updated_date(self): """ Get last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): """ Set last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ self._last_updated_date = value @property def redundancy_enabled(self): """ Get redundancy_enabled value. Notes: Allow/Disallow redundant appliances and VIP This attribute is named `redundancyEnabled` in VSD API. """ return self._redundancy_enabled @redundancy_enabled.setter def redundancy_enabled(self, value): """ Set redundancy_enabled value. Notes: Allow/Disallow redundant appliances and VIP This attribute is named `redundancyEnabled` in VSD API. """ self._redundancy_enabled = value @property def template_id(self): """ Get template_id value. Notes: Template to which this overlay mirror destination belongs to This attribute is named `templateID` in VSD API. """ return self._template_id @template_id.setter def template_id(self, value): """ Set template_id value. Notes: Template to which this overlay mirror destination belongs to This attribute is named `templateID` in VSD API. """ self._template_id = value @property def description(self): """ Get description value. Notes: Description of this overlay mirror destination """ return self._description @description.setter def description(self, value): """ Set description value. Notes: Description of this overlay mirror destination """ self._description = value @property def destination_type(self): """ Get destination_type value. Notes: Determines the type of destination : redirection target or overlay mirror destination This attribute is named `destinationType` in VSD API. """ return self._destination_type @destination_type.setter def destination_type(self, value): """ Set destination_type value. Notes: Determines the type of destination : redirection target or overlay mirror destination This attribute is named `destinationType` in VSD API. """ self._destination_type = value @property def virtual_network_id(self): """ Get virtual_network_id value. Notes: Auto Generated by VSD. Each overlay mirror destination with redundancy=enable and EndpointType != none will have a globally unique ESI & VNID generated by VSD This attribute is named `virtualNetworkID` in VSD API. """ return self._virtual_network_id @virtual_network_id.setter def virtual_network_id(self, value): """ Set virtual_network_id value. Notes: Auto Generated by VSD. Each overlay mirror destination with redundancy=enable and EndpointType != none will have a globally unique ESI & VNID generated by VSD This attribute is named `virtualNetworkID` in VSD API. """ self._virtual_network_id = value @property def embedded_metadata(self): """ Get embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): """ Set embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ self._embedded_metadata = value @property def end_point_type(self): """ Get end_point_type value. Notes: EndPointType is an enum. It defines the type of header rewrite and forwarding performed by VRS when the endpoint is used as a mirror destination. Possible value is VIRTUAL_WIRE This attribute is named `endPointType` in VSD API. """ return self._end_point_type @end_point_type.setter def end_point_type(self, value): """ Set end_point_type value. Notes: EndPointType is an enum. It defines the type of header rewrite and forwarding performed by VRS when the endpoint is used as a mirror destination. Possible value is VIRTUAL_WIRE This attribute is named `endPointType` in VSD API. """ self._end_point_type = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def creation_date(self): """ Get creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ return self._creation_date @creation_date.setter def creation_date(self, value): """ Set creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ self._creation_date = value @property def trigger_type(self): """ Get trigger_type value. Notes: Trigger type, THIS IS READ ONLY. Possible values are NONE, GARP. This attribute is named `triggerType` in VSD API. """ return self._trigger_type @trigger_type.setter def trigger_type(self, value): """ Set trigger_type value. Notes: Trigger type, THIS IS READ ONLY. Possible values are NONE, GARP. This attribute is named `triggerType` in VSD API. """ self._trigger_type = value @property def owner(self): """ Get owner value. Notes: Identifies the user that has created this object. """ return self._owner @owner.setter def owner(self, value): """ Set owner value. Notes: Identifies the user that has created this object. """ self._owner = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value
#!/usr/bin/python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This is all roughly based on the Makefile system used by the Linux # kernel, but is a non-recursive make -- we put the entire dependency # graph in front of make and let it figure it out. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level Makefile. This means that all # variables in .mk-files clobber one another. Be careful to use := # where appropriate for immediate evaluation, and similarly to watch # that you're not relying on a variable value to last beween different # .mk files. # # TODOs: # # Global settings and utility functions are currently stuffed in the # toplevel Makefile. It may make sense to generate some .mk files on # the side to keep the the files readable. import gyp import gyp.common import os.path # Debugging-related imports -- remove me once we're solid. import code import pprint generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'OS': 'linux', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_SUFFIX': '.so', 'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/geni', 'SHARED_INTERMEDIATE_DIR': '$(obj)/gen', 'PRODUCT_DIR': '$(builddir)', 'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)', 'LIB_DIR': '$(obj).$(TOOLSET)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(abspath $<)', # These appear unused -- ??? 'RULE_INPUT_EXT': 'XXXEXT$(suffix $^)', 'RULE_INPUT_NAME': 'XXXNAME$(notdir $(basename $^)0', 'CONFIGURATION_NAME': '$(BUILDTYPE)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True def ensure_directory_exists(path): dir = os.path.dirname(path) if dir and not os.path.exists(dir): os.makedirs(dir) # Header of toplevel Makefile. # This should go into the build tree, but it's easier to keep it here for now. SHARED_HEADER = ("""\ # We borrow heavily from the kernel build setup, though we are simpler since # we don't have Kconfig tweaking settings on us. # The implicit make rules have it looking for RCS files, among other things. # We instead explicitly write all the rules we care about. # It's even quicker (saves ~200ms) to pass -r on the command line. MAKEFLAGS=-r # The V=1 flag on command line makes us verbosely print command lines. ifdef V quiet= else quiet=quiet_ endif # Specify BUILDTYPE=Release on the command line for a release build. BUILDTYPE ?= __default_configuration__ # Directory all our build output goes into. # Note that this must be two directories beneath src/ for unit tests to pass, # as they reach into the src/ directory for data with relative paths. builddir ?= $(builddir_name)/$(BUILDTYPE) abs_builddir := $(abspath $(builddir)) # Object output directory. obj := $(builddir)/obj abs_obj := $(abspath $(obj)) # We build up a list of each target that we want to be generated by default. all_targets := # We build up a list of every single one of the targets so we can slurp in the # generated dependency rule Makefiles in one pass. all_deps := # C++ apps need to be linked with g++. Not sure what's appropriate. LINK ?= $(CXX) CC.target ?= $(CC) CXX.target ?= $(CXX) LINK.target ?= $(LINK) AR.target ?= $(AR) RANLIB.target ?= ranlib CC.host ?= gcc CXX.host ?= g++ LINK.host ?= g++ AR.host ?= ar RANLIB.host ?= ranlib # Flags to make gcc output dependency info. Note that you need to be # careful here to use the flags that ccache and distcc can understand. # We write to a temporary dep file first and then rename at the end # so we can't end up with a broken dep file. depfile = [email protected] DEPFLAGS = -MMD -MF $(depfile).tmp # We have to fixup the deps output in a few ways. # First, the file output should to mention the proper .o file. # ccache or distcc lose the path to the target, so we convert a rule of # the form: # foobar.o: DEP1 DEP2 # into # path/to/foobar.o: DEP1 DEP2 # Additionally, we want to make missing files not cause us to needlessly # rebuild. We want to rewrite # foobar.o: DEP1 DEP2 \\ # DEP3 # to # DEP1 DEP2: # DEP3: # so if the files are missing, they're just considered phony rules. # We have to do some pretty insane escaping to get those backslashes # and dollar signs past make, the shell, and sed at the same time.""" r""" define fixup_dep sed -i -e "s|^$(notdir $@)|$@|" $(depfile).tmp sed -e "s|^[^:]*: *||" -e "s| *\\\\$$||" -e 's|^ *||' \ -e "/./s|$$|:|" $(depfile).tmp >> $(depfile).tmp cat $(depfile).tmp >> $(depfile) rm -f $(depfile).tmp endef """ """ # Command definitions: # - cmd_foo is the actual command to run; # - quiet_cmd_foo is the brief-output summary of the command. quiet_cmd_cc = CC($(TOOLSET)) $@ cmd_cc = $(CC.$(TOOLSET)) $(CFLAGS) $(DEPFLAGS) -c -o $@ $< quiet_cmd_cxx = CXX($(TOOLSET)) $@ cmd_cxx = $(CXX.$(TOOLSET)) $(CXXFLAGS) $(DEPFLAGS) -c -o $@ $< quiet_cmd_alink = AR+RANLIB($(TOOLSET)) $@ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) rc $@ $(filter %.o,$^) && $(RANLIB.$(TOOLSET)) $@ quiet_cmd_touch = TOUCH $@ cmd_touch = touch $@ quiet_cmd_copy = COPY $@ cmd_copy = ln -f $< $@ || cp -af $< $@ # Due to circular dependencies between libraries :(, we wrap the # special "figure out circular dependencies" flags around the entire # input list during linking. quiet_cmd_link = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(LDFLAGS) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) # Shared-object link (for generating .so). # Set SONAME to the library filename so our binaries don't reference the local, # absolute paths used on the link command-line. # TODO: perhaps this can share with the LINK command above? quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(LDFLAGS) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) """ r""" # Define an escape_quotes function to escape single quotes. # This allows us to handle quotes properly as long as we always use # use single quotes and escape_quotes. escape_quotes = $(subst ','\'',$(1)) # This comment is here just to include a ' to unconfuse syntax highlighting. # Define an escape_vars function to escape '$' variable syntax. # This allows us to read/write command lines wth shell variables (e.g. # $LD_LIBRARY_PATH), without triggering make substitution. escape_vars = $(subst $$,$$$$,$(1)) """ """ # Helper to compare the command we're about to run against the command # we logged the last time we ran the command. Produces an empty # string (false) when the commands match. # Tricky point: Make has no string-equality test function. # The kernel uses the following, but it seems like it would have false # positives, where one string reordered its arguments. # arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\ # $(filter-out $(cmd_$@), $(cmd_$(1)))) # We instead substitute each for the empty string into the other, and # say they're equal if both substitutions produce the empty string. command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$@)),\\ $(subst $(cmd_$@),,$(cmd_$(1)))) # Helper that is non-empty when a prerequisite changes. # Normally make does this implicitly, but we force rules to always run # so we can check their command lines. # $? -- new prerequisites # $| -- order-only dependencies prereq_changed = $(filter-out $|,$?) # do_cmd: run a command via the above cmd_foo names, if necessary. # Should always run for a given target to handle command-line changes. # Second argument, if non-zero, makes it do C/C++ dependency munging. define do_cmd $(if $(or $(command_changed),$(prereq_changed)), @echo ' $($(quiet)cmd_$(1))' @mkdir -p $(dir $@) @$(cmd_$(1)) @echo '$(call escape_vars,$(call escape_quotes,cmd_$@ := $(cmd_$(1))))' > $(depfile) @$(if $(2),$(fixup_dep)) ) endef # Declare "all" target first so it is the default, even though we don't have the # deps yet. .PHONY: all all: # make looks for ways to re-generate included makefiles, but in our case, we # don't have a direct way. Explicitly telling make that it has nothing to do # for them makes it go faster. %.d: ; # Use FORCE_DO_CMD to force a target to run. Should be coupled with # do_cmd. .PHONY: FORCE_DO_CMD FORCE_DO_CMD: """) ROOT_HEADER_SUFFIX_RULES = ("""\ # Suffix rules, putting all outputs into $(obj). $(obj).$(TOOLSET)/%.o: $(srcdir)/%.c FORCE_DO_CMD @$(call do_cmd,cc,1) $(obj).$(TOOLSET)/%.o: $(srcdir)/%.s FORCE_DO_CMD @$(call do_cmd,cc) $(obj).$(TOOLSET)/%.o: $(srcdir)/%.S FORCE_DO_CMD @$(call do_cmd,cc) $(obj).$(TOOLSET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD @$(call do_cmd,cxx,1) $(obj).$(TOOLSET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD @$(call do_cmd,cxx,1) $(obj).$(TOOLSET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD @$(call do_cmd,cxx,1) # Try building from generated source, too. $(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD @$(call do_cmd,cc,1) $(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD @$(call do_cmd,cxx,1) $(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD @$(call do_cmd,cxx,1) $(obj).$(TOOLSET)/%.o: $(obj)/%.c FORCE_DO_CMD @$(call do_cmd,cc,1) $(obj).$(TOOLSET)/%.o: $(obj)/%.cc FORCE_DO_CMD @$(call do_cmd,cxx,1) $(obj).$(TOOLSET)/%.o: $(obj)/%.cpp FORCE_DO_CMD @$(call do_cmd,cxx,1) """) SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\ # Suffix rules, putting all outputs into $(obj). """) SHARED_HEADER_SUFFIX_RULES_SRCDIR = { '.c': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.c FORCE_DO_CMD @$(call do_cmd,cc,1) """), '.s': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.s FORCE_DO_CMD @$(call do_cmd,cc) """), '.S': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.S FORCE_DO_CMD @$(call do_cmd,cc) """), '.cpp': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD @$(call do_cmd,cxx,1) """), '.cc': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD @$(call do_cmd,cxx,1) """), '.cxx': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD @$(call do_cmd,cxx,1) """), } SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\ # Try building from generated source, too. """) SHARED_HEADER_SUFFIX_RULES_OBJDIR1 = { '.c': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD @$(call do_cmd,cc,1) """), '.cc': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD @$(call do_cmd,cxx,1) """), '.cpp': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD @$(call do_cmd,cxx,1) """), } SHARED_HEADER_SUFFIX_RULES_OBJDIR2 = { '.c': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.c FORCE_DO_CMD @$(call do_cmd,cc,1) """), '.cc': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD @$(call do_cmd,cxx,1) """), '.cpp': ("""\ $(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cpp FORCE_DO_CMD @$(call do_cmd,cxx,1) """), } SHARED_HEADER_SUFFIX_RULES = ( SHARED_HEADER_SUFFIX_RULES_COMMENT1 + ''.join(SHARED_HEADER_SUFFIX_RULES_SRCDIR.values()) + SHARED_HEADER_SUFFIX_RULES_COMMENT2 + ''.join(SHARED_HEADER_SUFFIX_RULES_OBJDIR1.values()) + ''.join(SHARED_HEADER_SUFFIX_RULES_OBJDIR2.values()) ) # This gets added to the very beginning of the Makefile. SHARED_HEADER_SRCDIR = ("""\ # The source directory tree. srcdir := %s """) SHARED_HEADER_BUILDDIR_NAME = ("""\ # The name of the builddir. builddir_name ?= %s """) SHARED_FOOTER = """\ # Now that we've included the sub-makefiles, we can define the rule depending on # all_targets. all: $(all_targets) # Add in dependency-tracking rules. $(all_deps) is the list of every single # target in our tree. First, only consider targets that already have been # built, as unbuilt targets will be built regardless of dependency info: all_deps := $(wildcard $(sort $(all_deps))) # Of those, only consider the ones with .d (dependency) info: d_files := $(wildcard $(foreach f,$(all_deps),$(f).d)) ifneq ($(d_files),) include $(d_files) endif """ header = """\ # This file is generated by gyp; do not edit. """ def Compilable(filename): """Return true if the file is compilable (should be in OBJS).""" for res in (filename.endswith(e) for e in ['.c', '.cc', '.cpp', '.cxx', '.s', '.S']): if res: return True return False def Target(filename): """Translate a compilable filename to its .o target.""" return os.path.splitext(filename)[0] + '.o' def QuoteIfNecessary(string): if '"' in string: string = '"' + string.replace('"', '\\"') + '"' return string srcdir_prefix = '' def Sourceify(path): """Convert a path to its source directory form.""" if '$(' in path: return path if os.path.isabs(path): return path return srcdir_prefix + path # Map from qualified target to path to output. target_outputs = {} # Map from qualified target to a list of all linker dependencies, # transitively expanded. # Used in building shared-library-based executables. target_link_deps = {} class MakefileWriter: """MakefileWriter packages up the writing of one target-specific foobar.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def Write(self, qualified_target, base_path, output_filename, spec, configs, part_of_all): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' """ print 'Generating %s' % output_filename ensure_directory_exists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] extra_link_deps = [] self.output = self.ComputeOutput(spec) self._INSTALLABLE_TARGETS = ('executable', 'loadable_module', 'shared_library') if self.type in self._INSTALLABLE_TARGETS: self.alias = os.path.basename(self.output) else: self.alias = self.output self.WriteLn("TOOLSET := " + self.toolset) self.WriteLn("TARGET := " + self.target) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs, part_of_all) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs, part_of_all) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs, part_of_all) all_sources = spec.get('sources', []) + extra_sources if all_sources: self.WriteSources(configs, deps, all_sources, extra_outputs, extra_link_deps, part_of_all) sources = filter(Compilable, all_sources) if sources: self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1) extensions = set([os.path.splitext(s)[1] for s in sources]) for ext in extensions: if ext in SHARED_HEADER_SUFFIX_RULES_SRCDIR: self.WriteLn(SHARED_HEADER_SUFFIX_RULES_SRCDIR[ext]) self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2) for ext in extensions: if ext in SHARED_HEADER_SUFFIX_RULES_OBJDIR1: self.WriteLn(SHARED_HEADER_SUFFIX_RULES_OBJDIR1[ext]) for ext in extensions: if ext in SHARED_HEADER_SUFFIX_RULES_OBJDIR2: self.WriteLn(SHARED_HEADER_SUFFIX_RULES_OBJDIR2[ext]) self.WriteLn('# End of this set of suffix rules') self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps, extra_outputs, part_of_all) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = self.alias # Update global list of link dependencies. if self.type == 'static_library': target_link_deps[qualified_target] = [self.output] elif self.type == 'shared_library': # Anyone that uses us transitively depend on all of our link # dependencies. target_link_deps[qualified_target] = [self.output] + link_deps self.fp.close() def WriteActions(self, actions, extra_sources, extra_outputs, part_of_all): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) part_of_all: flag indicating this target is part of 'all' """ for action in actions: name = self.target + '_' + action['action_name'] self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs # Write the actual command. command = gyp.common.EncodePOSIXShellList(action['action']) if 'message' in action: self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message'])) else: self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name)) if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command # Set LD_LIBRARY_PATH in case the action runs an executable from this # build which links to shared libs from this build. if self.path: cd_action = 'cd %s; ' % Sourceify(self.path) else: cd_action = '' # actions run on the host, so they should in theory only use host # libraries, but until everything is made cross-compile safe, also use # target libraries. # TODO(piman): when everything is cross-compile safe, remove lib.target self.WriteLn('cmd_%s = export LD_LIBRARY_PATH=$(builddir)/lib.host:' '$(builddir)/lib.target:$$LD_LIBRARY_PATH; %s%s' % (name, cd_action, command)) self.WriteLn() outputs = map(self.Absolutify, outputs) # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the obj # variable for the action rule with an absolute version so that the output # goes in the right place. self.WriteMakeRule(outputs, ['obj := $(abs_obj)']) self.WriteMakeRule(outputs, ['builddir := $(abs_builddir)']) self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)), part_of_all=part_of_all, command=name) # Stuff the outputs in a variable so we can refer to them later. outputs_variable = 'action_%s_outputs' % name self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs))) extra_outputs.append('$(%s)' % outputs_variable) self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs, part_of_all): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) part_of_all: flag indicating this target is part of 'all' """ for rule in rules: name = self.target + '_' + rule['rule_name'] count = 0 self.WriteLn('### Generated for rule %s:' % name) all_outputs = [] for rule_source in rule['rule_sources']: dirs = set() rule_source_basename = os.path.basename(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root) for out in rule['outputs']] for out in outputs: dir = os.path.dirname(out) if dir: dirs.add(dir) if int(rule.get('process_outputs_as_sources', False)): extra_sources.append(out) all_outputs += outputs inputs = map(Sourceify, map(self.Absolutify, [rule_source] + rule.get('inputs', []))) actions = ['$(call do_cmd,%s_%d)' % (name, count)] if name == 'resources_grit': # HACK: This is ugly. Grit intentionally doesn't touch the # timestamp of its output file when the file doesn't change, # which is fine in hash-based dependency systems like scons # and forge, but not kosher in the make world. After some # discussion, hacking around it here seems like the least # amount of pain. actions += ['@touch --no-create $@'] self.WriteMakeRule(outputs, ['obj := $(abs_obj)']) self.WriteMakeRule(outputs, ['builddir := $(abs_builddir)']) self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions) if part_of_all: self.WriteLn('all_targets += %s' % ' '.join(outputs)) self.WriteLn('all_deps += %s' % ' '.join(outputs)) action = [self.ExpandInputRoot(ac, rule_source_root) for ac in rule['action']] mkdirs = '' if len(dirs) > 0: mkdirs = 'mkdir -p %s; ' % ' '.join(dirs) if self.path: cd_action = 'cd %s; ' % Sourceify(self.path) else: cd_action = '' self.WriteLn( "cmd_%(name)s_%(count)d = %(cd_action)s%(mkdirs)s%(action)s" % { 'action': gyp.common.EncodePOSIXShellList(action), 'cd_action': cd_action, 'count': count, 'mkdirs': mkdirs, 'name': name, }) self.WriteLn( 'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % { 'count': count, 'name': name, }) self.WriteLn() count += 1 outputs_variable = 'rule_%s_outputs' % name self.WriteList(all_outputs, outputs_variable) extra_outputs.append('$(%s)' % outputs_variable) self.WriteLn('### Finished generating for rule: %s' % name) self.WriteLn() self.WriteLn('### Finished generating for all rules') self.WriteLn('') def WriteCopies(self, copies, extra_outputs, part_of_all): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) part_of_all: flag indicating this target is part of 'all' """ self.WriteLn('### Generated for copy rule.') variable = self.target + '_copies' outputs = [] for copy in copies: for path in copy['files']: path = Sourceify(self.Absolutify(path)) filename = os.path.split(path)[1] output = Sourceify(self.Absolutify(os.path.join(copy['destination'], filename))) self.WriteDoCmd([output], [path], 'copy', part_of_all) outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(outputs))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteSources(self, configs, deps, sources, extra_outputs, extra_link_deps, part_of_all): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. configs, deps, sources: input from gyp. extra_outputs: a list of extra outputs this action should be dependent on; used to serialize action/rules before compilation extra_link_deps: a list that will be filled in with any outputs of compilation (to be used in link lines) part_of_all: flag indicating this target is part of 'all' """ # Write configuration-specific variables for CFLAGS, etc. for configname in sorted(configs.keys()): config = configs[configname] self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D') self.WriteLn("# Flags passed to both C and C++ files."); self.WriteList(config.get('cflags'), 'CFLAGS_%s' % configname) self.WriteLn("# Flags passed to only C (and not C++) files."); self.WriteList(config.get('cflags_c'), 'CFLAGS_C_%s' % configname) self.WriteLn("# Flags passed to only C++ (and not C) files."); self.WriteList(config.get('cflags_cc'), 'CFLAGS_CC_%s' % configname) includes = config.get('include_dirs') if includes: includes = map(Sourceify, map(self.Absolutify, includes)) self.WriteList(includes, 'INCS_%s' % configname, prefix='-I') sources = filter(Compilable, sources) objs = map(self.Objectify, map(self.Absolutify, map(Target, sources))) self.WriteList(objs, 'OBJS') if part_of_all: self.WriteLn('# Add to the list of dependencies for the default target') self.WriteLn('all_targets += $(OBJS)') self.WriteLn() self.WriteLn('# Add to the list of files we specially track ' 'dependencies for.') self.WriteLn('all_deps += $(OBJS)') self.WriteLn() # Make sure our dependencies are built first. if deps: self.WriteMakeRule(['$(OBJS)'], deps, comment = 'Make sure our dependencies are built ' 'before any of us.', order_only = True) # Make sure the actions and rules run first. # If they generate any extra headers etc., the per-.o file dep tracking # will catch the proper rebuilds, so order only is still ok here. if extra_outputs: self.WriteMakeRule(['$(OBJS)'], extra_outputs, comment = 'Make sure our actions/rules run ' 'before any of us.', order_only = True) if objs: extra_link_deps.append('$(OBJS)') self.WriteLn("""\ # CFLAGS et al overrides must be target-local. # See "Target-specific Variable Values" in the GNU Make manual.""") self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)") self.WriteLn("$(OBJS): CFLAGS := $(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_C_$(BUILDTYPE)) " "$(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE))") self.WriteLn("$(OBJS): CXXFLAGS := $(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_CC_$(BUILDTYPE)) " "$(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE))") self.WriteLn() def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ output = None target = spec['target_name'] path = spec.get('product_dir', os.path.join('$(obj).' + self.toolset, self.path)) if self.type == 'static_library': target = 'lib%s.a' % (target[:3] == 'lib' and [target[3:]] or [target])[0] elif self.type in ('loadable_module', 'shared_library'): target = 'lib%s.so' % (target[:3] == 'lib' and [target[3:]] or [target])[0] path = spec.get('product_dir', os.path.join('$(builddir)', 'lib.' + self.toolset, self.path)) elif self.type == 'none': target = '%s.stamp' % target elif self.type == 'settings': return None elif self.type == 'executable': target = spec.get('product_name', target) path = spec.get('product_dir', os.path.join('$(builddir)')) else: print ("ERROR: What output file should be generated?", "typ", self.type, "target", target) return os.path.join(path, target) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.extend(target_link_deps[dep]) deps.extend(link_deps) # TODO: It seems we need to transitively link in libraries (e.g. -lfoo)? # This hack makes it work: # link_deps.extend(spec.get('libraries', [])) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteTarget(self, spec, configs, deps, link_deps, extra_outputs, part_of_all): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() extra_outputs: any extra outputs that our target should depend on part_of_all: flag indicating this target is part of 'all' """ self.WriteLn('### Rules for final target.') if extra_outputs: self.WriteMakeRule([self.output], extra_outputs, comment = 'Build our special outputs first.', order_only = True) if self.type not in ('settings', 'none'): for configname in sorted(configs.keys()): config = configs[configname] self.WriteList(config.get('ldflags'), 'LDFLAGS_%s' % configname) self.WriteList(spec.get('libraries'), 'LIBS') self.WriteLn('%s: LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' % self.output) self.WriteLn('%s: LIBS := $(LIBS)' % self.output) if self.type == 'executable': self.WriteDoCmd([self.output], link_deps, 'link', part_of_all) elif self.type == 'static_library': self.WriteDoCmd([self.output], link_deps, 'alink', part_of_all) elif self.type in ('loadable_module', 'shared_library'): self.WriteDoCmd([self.output], link_deps, 'solink', part_of_all) elif self.type == 'none': # Write a stamp line. self.WriteDoCmd([self.output], deps, 'touch', part_of_all) elif self.type == 'settings': # Only used for passing flags around. pass else: print "WARNING: no output for", self.type, target # Add an alias for each target (if there are any outputs). if self.output and self.output != self.target: self.WriteMakeRule([self.target], [self.output], comment='Add target alias') # Add special-case rules for our installable targets. # 1) They need to install to the build dir or "product" dir. # 2) They get shortcuts for building (e.g. "make chrome"). # 3) They are part of "make all". if self.type in self._INSTALLABLE_TARGETS: if self.type in ('shared_library'): file_desc = 'shared library' # Install all shared libs into a common directory (per toolset) for # convenient access with LD_LIBRARY_PATH. binpath = '$(builddir)/lib.%s/%s' % (self.toolset, self.alias) else: file_desc = 'executable' binpath = '$(builddir)/' + self.alias installable_deps = [self.output] if binpath != self.output: self.WriteDoCmd([binpath], [self.output], 'copy', comment = 'Copy this to the %s output path.' % file_desc, part_of_all=part_of_all) installable_deps.append(binpath) if self.output != self.alias: self.WriteMakeRule([self.alias], installable_deps, comment = 'Short alias for building this %s.' % file_desc, phony = True) if part_of_all: self.WriteMakeRule(['all'], [binpath], comment = 'Add %s to "all" target.' % file_desc, phony = True) def WriteList(self, list, variable=None, prefix=''): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ self.fp.write(variable + " := ") if list: list = [QuoteIfNecessary(prefix + l) for l in list] self.fp.write(" \\\n\t".join(list)) self.fp.write("\n\n") def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None): """Write a Makefile rule that uses do_cmd. This makes the outputs dependent on the command line that was run, as well as support the V= make command line flag. """ self.WriteMakeRule(outputs, inputs, actions = ['$(call do_cmd,%s)' % command], comment = comment, force = True) if part_of_all: # Add our outputs to the list of dependencies of the default target self.WriteLn('all_targets += %s' % ' '.join(outputs)) # Add our outputs to the list of targets we read depfiles from. self.WriteLn('all_deps += %s' % ' '.join(outputs)) def WriteMakeRule(self, outputs, inputs, actions=None, comment=None, order_only=False, force=False, phony=False): """Write a Makefile rule, with some extra tricks. outputs: a list of outputs for the rule (note: this is not directly supported by make; see comments below) inputs: a list of inputs for the rule actions: a list of shell commands to run for the rule comment: a comment to put in the Makefile above the rule (also useful for making this Python script's code self-documenting) order_only: if true, makes the dependency order-only force: if true, include FORCE_DO_CMD as an order-only dep phony: if true, the rule does not actually generate the named output, the output is just a name to run the rule """ if comment: self.WriteLn('# ' + comment) if phony: self.WriteLn('.PHONY: ' + ' '.join(outputs)) # TODO(evanm): just make order_only a list of deps instead of these hacks. if order_only: order_insert = '| ' else: order_insert = '' if force: force_append = ' FORCE_DO_CMD' else: force_append = '' if actions: self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0]) self.WriteLn('%s: %s%s%s' % (outputs[0], order_insert, ' '.join(inputs), force_append)) if actions: for action in actions: self.WriteLn('\t%s' % action) if len(outputs) > 1: # If we have more than one output, a rule like # foo bar: baz # that for *each* output we must run the action, potentially # in parallel. That is not what we're trying to write -- what # we want is that we run the action once and it generates all # the files. # http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html # discusses this problem and has this solution: # 1) Write the naive rule that would produce parallel runs of # the action. # 2) Make the outputs seralized on each other, so we won't start # a a parallel run until the first run finishes, at which point # we'll have generated all the outputs and we're done. self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0])) self.WriteLn() def WriteLn(self, text=''): self.fp.write(text + '\n') def Objectify(self, path): """Convert a path to its output directory form.""" if '$(' in path: path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset) return path return '$(obj).%s/$(TARGET)/%s' % (self.toolset, path) def Absolutify(self, path): """Convert a subdirectory-relative path into a base-relative path. Skips over paths that contain variables.""" if '$(' in path: return path return os.path.normpath(os.path.join(self.path, path)) def FixupArgPath(self, arg): if '/' in arg or '.h.' in arg: return self.Absolutify(arg) return arg def ExpandInputRoot(self, template, expansion): if '%(INPUT_ROOT)s' not in template: return template path = template % { 'INPUT_ROOT': expansion } if not os.path.dirname(path): # If it's just the file name, turn it into a path so FixupArgPath() # will know to Absolutify() it. path = os.path.join('.', path) return path def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'Makefile' + options.suffix makefile_path = os.path.join(options.depth, makefile_name) if options.generator_output: global srcdir_prefix makefile_path = os.path.join(options.generator_output, makefile_path) srcdir = gyp.common.RelativePath(srcdir, options.generator_output) srcdir_prefix = '$(srcdir)/' ensure_directory_exists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(SHARED_HEADER_SRCDIR % srcdir) root_makefile.write(SHARED_HEADER_BUILDDIR_NAME % builddir_name) root_makefile.write(SHARED_HEADER.replace('__default_configuration__', default_configuration)) for toolset in toolsets: root_makefile.write('TOOLSET := %s\n' % toolset) root_makefile.write(ROOT_HEADER_SUFFIX_RULES) # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = [] for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) build_files.add(gyp.common.RelativePath(build_file, options.depth)) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.depth) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets found "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the .mk file in the base_path directory. output_file = os.path.join(options.depth, base_path, target + '.' + toolset + options.suffix + '.mk') if options.generator_output: output_file = os.path.join(options.generator_output, output_file) spec = target_dicts[qualified_target] configs = spec['configurations'] writer = MakefileWriter() writer.Write(qualified_target, base_path, output_file, spec, configs, part_of_all=qualified_target in needed_targets) # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. submakefile_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.append('include ' + submakefile_path + '\n') # Write out the sorted list of includes. include_list.sort() root_makefile.write('\n') for include in include_list: root_makefile.write(include) root_makefile.write('\n') # Write the target to regenerate the Makefile. if generator_flags.get('auto_regeneration', True): build_files_args = [gyp.common.RelativePath(filename, options.depth) for filename in params['build_files_arg']] root_makefile.write("%s: %s\n\t%s\n" % ( makefile_name, ' '.join(map(Sourceify, build_files)), gyp.common.EncodePOSIXShellList( [gyp.common.FixIfRelativePath(params['gyp_binary'], options.depth), '-fmake'] + gyp.RegenerateFlags(options) + build_files_args))) root_makefile.write(SHARED_FOOTER) root_makefile.close()
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import os from testutils import mock from jobslave.generators import docker from jobslave_test.jobslave_helper import JobSlaveHelper import logging class DockerTest(JobSlaveHelper): def setUp(self): super(DockerTest, self).setUp() self.origLogHandlers = logging.root.handlers logging.root.handlers = [] self.logFile = os.path.join(self.workDir, "build.log") logging.basicConfig(filename=self.logFile, filemode="w", format="%(filename)s/%(funcName)s: %(message)s", level=logging.DEBUG) def tearDown(self): for handler in logging.root.handlers: handler.close() logging.root.handlers = self.origLogHandlers super(DockerTest, self).tearDown() def _mock(self, img, dockerBuildTree, withURLOpener=False): self.data['data'].update(dockerBuildTree=json.dumps(dockerBuildTree)) self.slaveCfg.conaryProxy = "http://[fe80::250:56ff:fec0:1]/conary" origLogCall = docker.logCall self.logCallArgs = logCallArgs = [] def mockLogCall(cmd, **kw): logCallArgs.append((cmd, kw)) if cmd[0].startswith('mount') or cmd[0].startswith('umount'): return return origLogCall(cmd, **kw) self.mock(docker, 'logCall', mockLogCall) mock.mockMethod(img.downloadChangesets) mock.mockMethod(img.postOutput) mock.mockMethod(img.status) mock.mockMethod(img.installFilesInExistingTree) imgNames = [] stack = [ dockerBuildTree ] while stack: top = stack.pop() if top.get('url') is None: imgNames.append(img.sanitizeBaseFileName(top['buildData']['name'])) stack.extend(top.get('children', [])) imgNames.sort() tarballs = [ os.path.join(docker.constants.finishedDir, img.UUID, x + '.tar.gz') for x in imgNames ] if withURLOpener: self._mockURLOpener(img, dockerBuildTree) return tarballs def _mockURLOpener(self, img, dockerBuildTree): extractedLayerDir = os.path.join(self.workDir, "tests", "uncompressed-layer") layersDir = os.path.join(self.workDir, 'tests', 'layers') docker.util.mkdirChain(extractedLayerDir) file(os.path.join(extractedLayerDir, "dummy"), "w").write("dummy") dockerImageIds = [ dockerBuildTree['dockerImageId'] ] fakeParents = [(x['id'], x) for x in dockerBuildTree.get('_fakeParents', [])] dockerImageIds.extend(x[0] for x in fakeParents) fakeParents = dict(fakeParents) fakeParents[dockerBuildTree['dockerImageId']] = dict( manifest=dockerBuildTree.pop('manifest', {})) dockerImageIds.reverse() parent = None repos = {} for i, dockerImageId in enumerate(dockerImageIds): layerDir = os.path.join(layersDir, dockerImageId) docker.util.mkdirChain(layerDir) docker.logCall(["tar", "-C", extractedLayerDir, "-cf", os.path.join(layerDir, "layer.tar"), "."]) # If a manifest is present in the "fake" parents, use it meta = fakeParents.get(dockerImageId, {}).get('manifest', {}) if parent is not None: meta['parent'] = parent json.dump(meta, file(os.path.join(layerDir, 'json'), "w")) repos['my-super-repo/img-%d' % i] = { 'latest' : dockerImageId } repos['my-lame-repo/img-%d' % (100+i)] = { "tag-%02d" % i : dockerImageId } # Same name with different tags for different images repos.setdefault('my-release-repo/conflict', {})['image-%02d' % i] = dockerImageId parent = dockerImageId json.dump(repos, file(os.path.join(layersDir, 'repositories'), "w")) parentImage = os.path.join(self.workDir, "tests", "parent.tar.gz") docker.logCall(["tar", "-C", layersDir, "-zcf", parentImage, 'repositories', ] + dockerImageIds) def getImage(url): f = file(parentImage) # Make sure we don't download it again os.unlink(parentImage) return f img.response.getImage = getImage def testBaseImage(self): dockerBuildTree = dict( nvf="group-foo=/my.example.com@ns:1/12345.67:1-1-1[is: x86_64]", buildData=self.Data, ) img = docker.DockerImage(self.slaveCfg, self.data) tarballs = self._mock(img, dockerBuildTree) img.write() self.assertEquals( [x[0][0] for x in img.installFilesInExistingTree._mock.calls], [img.workDir + '/docker-image/unpacked/131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800']) self.assertEquals([ sorted(x.name for x in docker.tarfile.open(t)) for t in tarballs ], [[ '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/VERSION', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/json', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/layer.tar', 'repositories', ]]) def testChildImage(self): dockerBuildTree = dict( nvf="group-foo=/my.example.com@ns:1/12345.67:1-1-1[is: x86_64]", url="http://example.com/downloadFile?id=123", dockerImageId="131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800", buildData=self.Data, children=[ dict( nvf="group-bar=/my.example.com@ns:1/12345.67:2-1-1[is: x86_64]", buildData=dict( buildId=1001, name='bar-64bit', outputToken='OUTPUT-TOKEN-bar', ), ), ], ) img = docker.DockerImage(self.slaveCfg, self.data) tarballs = self._mock(img, dockerBuildTree, withURLOpener=True) img.write() self.assertEquals( [x[0][0] for x in img.installFilesInExistingTree._mock.calls], [ img.workDir + '/docker-image/unpacked/5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972.ovffs', ]) self.assertEquals([sorted(x.name for x in docker.tarfile.open(t)) for t in tarballs ], [[ '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/json', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/layer.tar', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/VERSION', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/json', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/layer.tar', 'repositories', ]]) # Call again, just to make sure we don't re-download img.write() def testDeepChildHierarchy(self): dockerBuildTree = dict( nvf="group-foo=/my.example.com@ns:1/12345.67:1-1-1[is: x86_64]", url="http://example.com/downloadFile?id=123", _fakeParents = [ dict( id="dockerImageIdFakeParent-1", manifest=dict( config=dict( Env=["FP1=1"], ExposedPorts={"21/ssh":{}}), ), ), dict( id="dockerImageIdFakeParent-0", ), ], manifest=dict(config=dict(Entrypoint=["/bin/bash"], Cmd="-x", Env=["FP1=1"], ExposedPorts={"22/ssh":{}})), dockerImageId="131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800", buildData=self.Data, children=[ dict( nvf="group-bar=/my.example.com@ns:1/12345.67:2-1-1[is: x86_64]", buildData=dict( buildId=1001, name='bar-64bit', outputToken='OUTPUT-TOKEN-bar', ), children=[ dict( nvf="group-baz=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=1002, name='baz-64bit', outputToken='OUTPUT-TOKEN-baz', ), ), ], ), ], ) img = docker.DockerImage(self.slaveCfg, self.data) tarballs = self._mock(img, dockerBuildTree, withURLOpener=True) img.write() self.assertEquals( [x[0][0] for x in img.installFilesInExistingTree._mock.calls], [ os.path.join(img.workDir, x) for x in [ 'docker-image/unpacked/5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972.ovffs', 'docker-image/unpacked/18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313.ovffs', ]]) self.assertEquals([sorted(x.name for x in docker.tarfile.open(t)) for t in tarballs ], [ [ '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/json', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/layer.tar', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/VERSION', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/json', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/layer.tar', 'dockerImageIdFakeParent-0', 'dockerImageIdFakeParent-0/json', 'dockerImageIdFakeParent-0/layer.tar', 'dockerImageIdFakeParent-1', 'dockerImageIdFakeParent-1/json', 'dockerImageIdFakeParent-1/layer.tar', 'repositories', ], [ '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/json', '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800/layer.tar', '18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313', '18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313/VERSION', '18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313/json', '18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313/layer.tar', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/VERSION', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/json', '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/layer.tar', 'dockerImageIdFakeParent-0', 'dockerImageIdFakeParent-0/json', 'dockerImageIdFakeParent-0/layer.tar', 'dockerImageIdFakeParent-1', 'dockerImageIdFakeParent-1/json', 'dockerImageIdFakeParent-1/layer.tar', 'repositories', ], ], ) self.assertEquals( [x[0] for x in img.postOutput._mock.calls], [ ((('%s/%s/bar-64bit.tar.gz' % (docker.constants.finishedDir, img.UUID), 'Tar File'),),), ((('%s/%s/baz-64bit.tar.gz' % (docker.constants.finishedDir, img.UUID), 'Tar File'),),), ] ) calls = img.postOutput._mock.calls # Remove created time, since that breaks our comparisons for call in calls: manif = json.loads(call[1][0][1]['manifest']) del manif['created'] call[1][0][1]['manifest'] = json.dumps(manif, sort_keys=True) self.assertEquals( [x[1] for x in img.postOutput._mock.calls], [ ( ('attributes', {'docker_image_id': '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972', 'installed_size': 40960, 'manifest' : '{"Architecture": "amd64", "Comment": "Created by Conary command: conary update \'group-bar=/my.example.com@ns:1/2-1-1[is: x86_64]\'", "Size": 10240, "config": {"Cmd": "-x", "Entrypoint": ["/bin/bash"], "Env": ["FP1=1", "PATH=/usr/sbin:/usr/bin:/sbin:/bin"], "ExposedPorts": {"22/ssh": {}}}, "container_config": {}, "docker_version": "1.8.1", "id": "5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972", "os": "linux", "parent": "131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800"}'}), ('forJobData', dockerBuildTree['children'][0]['buildData']), ), ( ('attributes', {'docker_image_id': '18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313', 'installed_size': 51200, 'manifest' : '{"Architecture": "amd64", "Comment": "Created by Conary command: conary update \'group-baz=/my.example.com@ns:1/3-1-1[is: x86_64]\'", "Size": 10240, "config": {"Cmd": "-x", "Entrypoint": ["/bin/bash"], "Env": ["FP1=1", "PATH=/usr/sbin:/usr/bin:/sbin:/bin"], "ExposedPorts": {"22/ssh": {}}}, "container_config": {}, "docker_version": "1.8.1", "id": "18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313", "os": "linux", "parent": "5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972"}'}), ('forJobData', dockerBuildTree['children'][0]['children'][0]['buildData']), ), ]) self.assertEquals( [x[0] for x in img.status._mock.calls], [ ('Downloading parent image',), ('Unpacking parent image',), ('Creating layer',), ('Creating manifest',), ('Packaging layers',), ('Build done', 300), ('Creating layer',), ('Creating manifest',), ('Packaging layers',), ('Build done', 300), ]) self.assertEquals( [x[1] for x in img.status._mock.calls], [ (), (), (), (), (), (('forJobData', dockerBuildTree['children'][0]['buildData']),), (), (), (), (('forJobData', dockerBuildTree['children'][0]['children'][0]['buildData']),), ]) repos = json.load(file(img.workDir + '/docker-image/layers/repositories')) self.assertEquals(repos, { 'my-super-repo/img-0': {'latest': 'dockerImageIdFakeParent-0'}, 'appeng/foo': {'1-1-1': '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800'}, 'my-super-repo/img-1': {'latest': 'dockerImageIdFakeParent-1'}, 'appeng/baz': {'3-1-1': '18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313'}, 'my-super-repo/img-2': {'latest': '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800'}, 'appeng/bar': {'2-1-1': '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972'}, 'my-lame-repo/img-102': {'tag-02': '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800'}, 'my-lame-repo/img-101': {'tag-01': 'dockerImageIdFakeParent-1'}, 'my-lame-repo/img-100': {'tag-00': 'dockerImageIdFakeParent-0'}, 'my-release-repo/conflict' : { 'image-00' : 'dockerImageIdFakeParent-0', 'image-01' : 'dockerImageIdFakeParent-1', 'image-02' : '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800', } }) manif = json.load(file(img.workDir + '/docker-image/layers/5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/json')) self.assertEquals(manif['config']['Entrypoint'], ['/bin/bash']) self.assertEquals(manif['config']['Env'], ['FP1=1', 'PATH=/usr/sbin:/usr/bin:/sbin:/bin']) self.assertEquals(manif['config']['ExposedPorts'], {'22/ssh': {}}) manif = json.load(file(img.workDir + '/docker-image/layers/18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313/json')) self.assertEquals(manif['config']['Env'], ['FP1=1', 'PATH=/usr/sbin:/usr/bin:/sbin:/bin']) self.assertEquals(manif['config']['ExposedPorts'], {'22/ssh': {}}) def testOverlayfsLimits(self): # APPENG-3414 dockerBuildTree = dict( nvf="group-foo=/my.example.com@ns:1/12345.67:1-1-1[is: x86_64]", url="http://example.com/downloadFile?id=123", _fakeParents = [dict(id="dockerImageIdFakeParent-2"), dict(id="dockerImageIdFakeParent-1")], dockerImageId="DockerImageIdFakeParent-0", buildData=self.Data, children=[ dict( nvf="group-A=/my.example.com@ns:1/12345.67:2-1-1[is: x86_64]", buildData=dict( buildId=10, name='A-64bit', outputToken='OUTPUT-TOKEN-A', ), children=[ dict( nvf="group-AA=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=100, name='AA-64bit', outputToken='OUTPUT-TOKEN-AA', ), children = [ dict( nvf="group-AAA=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=1000, name='AAA-64bit', outputToken='OUTPUT-TOKEN-AAA', ), children = [ dict( nvf="group-AAAA=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=10000, name='AAAA-64bit', outputToken='OUTPUT-TOKEN-AAAA', ), ), dict( nvf="group-AAAB=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=10001, name='AAAB-64bit', outputToken='OUTPUT-TOKEN-AAAB', ), ), ], ), dict( nvf="group-AAB=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=1001, name='AAB-64bit', outputToken='OUTPUT-TOKEN-AAB', ), ), ], ), dict( nvf="group-AB=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=101, name='AB-64bit', outputToken='OUTPUT-TOKEN-AB', ), children = [ dict( nvf="group-ABA=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=1010, name='ABA-64bit', outputToken='OUTPUT-TOKEN-ABA', ), children = [ dict( nvf="group-ABAA=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=10100, name='ABAA-64bit', outputToken='OUTPUT-TOKEN-ABAA', ), ), dict( nvf="group-ABAB=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=10101, name='ABAB-64bit', outputToken='OUTPUT-TOKEN-ABAB', ), ), ], ), ], ), ], ), ], ) img = docker.DockerImage(self.slaveCfg, self.data) tarballs = self._mock(img, dockerBuildTree, withURLOpener=True) img.write() lines = [ x for x in file(self.logFile) if x.startswith('docker.py/') ] self.assertEquals(''.join(lines), """\ docker.py/_downloadParentImage: Downloading parent image DockerImageIdFakeParent-0 docker.py/_downloadParentImage: Unpacking parent image as .../unpacked/DockerImageIdFakeParent-0 docker.py/_downloadParentImage: Extracting parent layer dockerImageIdFakeParent-1 on .../unpacked/DockerImageIdFakeParent-0 docker.py/_downloadParentImage: Extracting parent layer dockerImageIdFakeParent-2 on .../unpacked/DockerImageIdFakeParent-0 docker.py/_downloadParentImage: Extracting parent layer DockerImageIdFakeParent-0 on .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Building child image A-64bit, layer c6026eee80a1779cf07c46d57e628cf324bbc59f30f01b2c5828a7f7cc80a957 docker.py/mountOverlayFs: Mounting layer c6026eee80a1779cf07c46d57e628cf324bbc59f30f01b2c5828a7f7cc80a957 on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Installing group-A=/my.example.com@ns:1/2-1-1[is: x86_64] into .../unpacked/c6026eee80a1779cf07c46d57e628cf324bbc59f30f01b2c5828a7f7cc80a957.ovffs docker.py/writeChild: Building child image AA-64bit, layer d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c docker.py/writeChild: Extracting layer c6026eee80a1779cf07c46d57e628cf324bbc59f30f01b2c5828a7f7cc80a957 on .../unpacked/DockerImageIdFakeParent-0 docker.py/mountOverlayFs: Mounting layer d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Installing group-AA=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c.ovffs docker.py/writeChild: Building child image AAA-64bit, layer ba3eb7b677b067cc3e9d96b9ae4cd7a26147bfcafd279216920ac93140a7c8b3 docker.py/mountOverlayFs: Mounting layer d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/mountOverlayFs: Mounting layer ba3eb7b677b067cc3e9d96b9ae4cd7a26147bfcafd279216920ac93140a7c8b3 on top of .../unpacked/d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c.ovffs docker.py/writeChild: Installing group-AAA=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/ba3eb7b677b067cc3e9d96b9ae4cd7a26147bfcafd279216920ac93140a7c8b3.ovffs docker.py/writeChild: Building child image AAAA-64bit, layer 4d23ee7e0cf3673936bae6e79573d66ee5c8b6eda70bf2cb8d4660b6c446d5d9 docker.py/mountOverlayFs: Mounting layer d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Extracting layer ba3eb7b677b067cc3e9d96b9ae4cd7a26147bfcafd279216920ac93140a7c8b3 on .../unpacked/d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c.ovffs docker.py/mountOverlayFs: Mounting layer 4d23ee7e0cf3673936bae6e79573d66ee5c8b6eda70bf2cb8d4660b6c446d5d9 on top of .../unpacked/d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c.ovffs docker.py/writeChild: Installing group-AAAA=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/4d23ee7e0cf3673936bae6e79573d66ee5c8b6eda70bf2cb8d4660b6c446d5d9.ovffs docker.py/writeChild: Building child image AAAB-64bit, layer 50923db68d510024b722c3f35508636d5bbafe66ff937a530846b1c1e185d4c9 docker.py/mountOverlayFs: Mounting layer d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Extracting layer ba3eb7b677b067cc3e9d96b9ae4cd7a26147bfcafd279216920ac93140a7c8b3 on .../unpacked/d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c.ovffs docker.py/mountOverlayFs: Mounting layer 50923db68d510024b722c3f35508636d5bbafe66ff937a530846b1c1e185d4c9 on top of .../unpacked/d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c.ovffs docker.py/writeChild: Installing group-AAAB=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/50923db68d510024b722c3f35508636d5bbafe66ff937a530846b1c1e185d4c9.ovffs docker.py/writeChild: Building child image AAB-64bit, layer 48c4efe6960fa63f9b180dfaa9c07f1022da209358dd346958a6d35f6ad51b3b docker.py/mountOverlayFs: Mounting layer d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/mountOverlayFs: Mounting layer 48c4efe6960fa63f9b180dfaa9c07f1022da209358dd346958a6d35f6ad51b3b on top of .../unpacked/d42a05e6082c032fe6f3316848be3e6bb4918147860f3c2da0f9b0957048bc8c.ovffs docker.py/writeChild: Installing group-AAB=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/48c4efe6960fa63f9b180dfaa9c07f1022da209358dd346958a6d35f6ad51b3b.ovffs docker.py/writeChild: Building child image AB-64bit, layer de97564729559f69824a96665ddb4698982408dd13db98f4616c2fb5ffec0a99 docker.py/mountOverlayFs: Mounting layer de97564729559f69824a96665ddb4698982408dd13db98f4616c2fb5ffec0a99 on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Installing group-AB=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/de97564729559f69824a96665ddb4698982408dd13db98f4616c2fb5ffec0a99.ovffs docker.py/writeChild: Building child image ABA-64bit, layer 89e7b6f17c593889b68757edd7bdd8efdf373f337c1d275f68faaa38d6133601 docker.py/writeChild: Extracting layer de97564729559f69824a96665ddb4698982408dd13db98f4616c2fb5ffec0a99 on .../unpacked/DockerImageIdFakeParent-0 docker.py/mountOverlayFs: Mounting layer 89e7b6f17c593889b68757edd7bdd8efdf373f337c1d275f68faaa38d6133601 on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Installing group-ABA=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/89e7b6f17c593889b68757edd7bdd8efdf373f337c1d275f68faaa38d6133601.ovffs docker.py/writeChild: Building child image ABAA-64bit, layer 8089ffec4b48c8ac03e31974edfd3df6886920fbce323130debf15f23095bd80 docker.py/writeChild: Extracting layer 89e7b6f17c593889b68757edd7bdd8efdf373f337c1d275f68faaa38d6133601 on .../unpacked/DockerImageIdFakeParent-0 docker.py/mountOverlayFs: Mounting layer 8089ffec4b48c8ac03e31974edfd3df6886920fbce323130debf15f23095bd80 on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Installing group-ABAA=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/8089ffec4b48c8ac03e31974edfd3df6886920fbce323130debf15f23095bd80.ovffs docker.py/writeChild: Building child image ABAB-64bit, layer 380be0952033bb90c7f7924f1461fc746fea8461f09866e272d9874439cee697 docker.py/mountOverlayFs: Mounting layer 380be0952033bb90c7f7924f1461fc746fea8461f09866e272d9874439cee697 on top of .../unpacked/DockerImageIdFakeParent-0 docker.py/writeChild: Installing group-ABAB=/my.example.com@ns:1/3-1-1[is: x86_64] into .../unpacked/380be0952033bb90c7f7924f1461fc746fea8461f09866e272d9874439cee697.ovffs """) def testDeepHierarchy(self): dockerBuildTree = dict( nvf="group-foo=/my.example.com@ns:1/12345.67:1-1-1[is: x86_64]", dockerImageId="131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800", buildData=self.Data, children=[ dict( nvf="group-bar=/my.example.com@ns:1/12345.67:2-1-1[is: x86_64]", buildData=dict( buildId=1001, name='bar-64bit', outputToken='OUTPUT-TOKEN-bar', data=dict( dockerRepositoryName='repository-for-bar', dockerfile=""" MAINTAINER [email protected] EXPOSE 80 ENTRYPOINT [ "/usr/bin/a" ] CMD [ "-d" ]""", ) ), children=[ dict( nvf="group-baz=/my.example.com@ns:1/12345.67:3-1-1[is: x86_64]", buildData=dict( buildId=1002, name='baz-64bit', outputToken='OUTPUT-TOKEN-baz', data = dict( dockerRepositoryName='repository-for-baz', dockerfile=""" EXPOSE 443 ENTRYPOINT [ "/usr/bin/b" ] CMD [ "-d" ]""",) ), ), ], ), ], ) img = docker.DockerImage(self.slaveCfg, self.data) tarballs = self._mock(img, dockerBuildTree, withURLOpener=True) img.write() self.assertEquals( [x[0][0] for x in img.installFilesInExistingTree._mock.calls], [ os.path.join(img.workDir, x) for x in [ 'docker-image/unpacked/131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800', 'docker-image/unpacked/5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972.ovffs', 'docker-image/unpacked/18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313.ovffs', ]]) # Look at the json files manifest = json.load(file(img.workDir + '/docker-image/layers/18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313/json')) self.assertEquals(manifest.get('author'), None) self.assertEquals(manifest['config']['ExposedPorts'], {'443/tcp' : {}, '80/tcp' : {}}) self.assertEquals(manifest['Comment'], "Created by Conary command: conary update 'group-baz=/my.example.com@ns:1/3-1-1[is: x86_64]'") manifest = json.load(file(img.workDir + '/docker-image/layers/5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972/json')) self.assertEquals(manifest.get('author'), '[email protected]') self.assertEquals(manifest['config']['ExposedPorts'], {'80/tcp': {}}) self.assertEquals(manifest['Comment'], "Created by Conary command: conary update 'group-bar=/my.example.com@ns:1/2-1-1[is: x86_64]'") repos = json.load(file(img.workDir + '/docker-image/layers/repositories')) self.assertEquals(repos, { 'repository-for-bar/bar': { '2-1-1': '5414b567e26c01f2032e41e62a449fd2781f26011721b2b7cb947434c080c972', }, 'appeng/foo': { '1-1-1': '131ae464fe41edbb2cea58d9b67245482b7ac5d06fd72e44a9d62f6e49bac800', }, 'repository-for-baz/baz': { '3-1-1': '18723084021be3ea9dd7cc38b91714d34fb9faa464ea19c77294adc8f8453313', }, }) def testWriteLayerWithDeletions(self): img = docker.DockerImage(self.slaveCfg, self.data) mock.mockMethod(img.status) layersDir = os.path.join(img.workDir, "docker-image/layers") unpackDir = os.path.join(img.workDir, "docker-image/unpacked") explodedDir = os.path.join(self.workDir, "exploded") docker.util.mkdirChain(explodedDir) regularFilePath = os.path.join(explodedDir, 'regular-file') deletedFilePath = os.path.join(explodedDir, 'deleted-file') symlinkFile = os.path.join(explodedDir, 'symlink-file') file(regularFilePath, "w").write('a') file(deletedFilePath, "w") os.symlink("regular-file", symlinkFile) unpackDir2 = os.path.join(self.workDir, 'unpacked') docker.util.mkdirChain(os.path.join(unpackDir2, 'deleted-file')) origStat = os.lstat def mockStat(fname): if fname.endswith('deleted-file'): obj = mock.MockObject(st_mode=docker.stat.S_IFCHR, st_rdev=0) return obj return origStat(fname) self.mock(os, 'lstat', mockStat) origOpen = os.open def mockOpen(fname, mode, perms=0644): if fname.endswith('deleted-file'): perms = 0755 return origOpen(fname, mode, perms) self.mock(os, 'open', mockOpen) def mockMknod(fname, mode, dev): file(fname, "w").write("Mocked") self.mock(os, 'mknod', mockMknod) imgSpec = docker.ImageSpec(name='foo', dockerImageId='aaaa-bb-cc', nvf=docker.TroveTuple('group-foo=/cny.tv@ns:1/123.45:1-2-3[is: x86_64]')) img.writeLayer(explodedDir, layersDir, imgSpec, withDeletions=True) # Make sure we've reverted back to the ovf-marked deleted file self.assertEqual(file(deletedFilePath).read(), "Mocked") # Make sure the tarball has a .wh.deleted-file tarfilePath = os.path.join(layersDir, imgSpec.dockerImageId, 'layer.tar') tf = docker.tarfile.open(tarfilePath) deleted = [ x for x in tf if x.name == './.wh.deleted-file' ] self.assertEqual([ x.mode for x in deleted ], [ 0755 ]) # Now try to extract the layer self.unmock() mockTF = mock.MockObject() mockTF._mock._dict[0] = mock.MockObject(name='./regular-file') mockTF._mock._dict[1] = mock.MockObject(name='./.wh.deleted-file', mode=0) def mockOpen(fpath): return mockTF self.mock(docker.tarfile, 'open', mockOpen) img._extractLayer(unpackDir2, tarfilePath) # We should have only the regular file self.assertEqual(sorted(os.listdir(unpackDir2)), [ 'regular-file', 'symlink-file', ]) class DockerfileTest(JobSlaveHelper): def testParseDockerFile(self): txt = """ # THIS is a comment FROM aaa MAINTAINER [email protected] CMD /usr/sbin/httpd -X EXPOSE 80 EXPOSE "443/tcp" EXPOSE 53/udp 211/tcp """ df = docker.Dockerfile() df.parse(txt) manif = docker.Manifest() df.toManifest(manif) self.assertEquals(manif, docker.Manifest( config=docker.Manifest( Cmd=['/bin/sh', '-c', '/usr/sbin/httpd -X'], ExposedPorts={'211/tcp': {}, '443/tcp': {}, '80/tcp': {}, '53/udp': {}}, ), author="[email protected]", )) def testParseDockerFile2(self): txt = """ # THIS is a comment FROM aaa MAINTAINER <[email protected]> ENTRYPOINT ["/usr/sbin/httpd", "-D", "a b", ] CMD -X EXPOSE 80 EXPOSE "443/tcp" EXPOSE 53/udp 211/tcp ENV a=1 b=2 \\ c=3 ENV d=4 # With a comment ENV e 5 ENV f=6 c 33 g=two\ words h=localhost:80 """ df = docker.Dockerfile() df.parse(txt) self.assertEquals(df.environment, ['a=1', 'b=2', 'c=33', 'd=4', 'e=5', 'f=6', 'g=two words', 'h=localhost:80']) self.assertEquals(df.exposedPorts, ['211/tcp', '443/tcp', '53/udp', '80/tcp']) self.assertEquals(df.entrypoint, ["/usr/sbin/httpd", "-D", "a b",]) def testCmdAndEntrypoint(self): txt = """ ENTRYPOINT [ "/usr/sbin/httpd" , ] CMD ["-X"] """ df = docker.Dockerfile().parse(txt) self.assertEquals(df.entrypoint, [ "/usr/sbin/httpd" ]) self.assertEquals(df.cmd, ['-X']) def testCmdAndEntrypointInvalid(self): txt = """ ENTRYPOINT [ "/usr/sbin/httpd" , ] CMD "-X" """ df = docker.Dockerfile().parse(txt) self.assertEquals(df.entrypoint, [ "/usr/sbin/httpd" ]) self.assertEquals(df.cmd, ['/bin/sh', '-c', '-X']) def testDockerFileMerge(self): txt = """ MAINTAINER [email protected] ENTRYPOINT [ "/usr/sbin/httpd" ] CMD [ "-X" ] EXPOSE 80 EXPOSE "443/tcp" ENV a=1 b=2 """ df1 = docker.Dockerfile() df1.parse(txt) txt = """ MAINTAINER [email protected] CMD /usr/sbin/a EXPOSE "443/tcp" EXPOSE 211 ENV b=3 c=4 """ df2 = docker.Dockerfile() df2.parse(txt) child = df1.toManifest() child.merge(df2.toManifest()) self.assertEquals(child.exposedPorts, {'211/tcp':{}, '443/tcp':{}, '80/tcp':{}, }) self.assertEquals(child.author, '[email protected]') self.assertEqual(child, docker.Manifest( config = docker.Manifest({ 'Cmd': ['-X'], 'Entrypoint': ['/usr/sbin/httpd'], 'ExposedPorts': { '211/tcp' : {}, '443/tcp' : {}, '80/tcp' : {}}, 'Env' : [ "a=1", "b=2", "c=4" ], }), author = '[email protected]', )) def testDockerFileMerge_Entrypoint_CMD(self): # Parent dockerfile, child dockerfile, expected entrypoint, expected cmd combinations = [ ('ENTRYPOINT [ "/bin/echo", "foo" ]', 'CMD [ "/bin/bash" ]', [ "/bin/echo", "foo" ], [ "/bin/bash" ]), ('CMD [ "/bin/bash" ]', 'ENTRYPOINT [ "/bin/echo", "foo" ]', [ "/bin/echo", "foo" ], None), ('ENTRYPOINT [ "/bin/echo" ]\nCMD ["foo"]', 'CMD [ "/bin/bash" ]', [ "/bin/echo" ], [ "/bin/bash" ]), ('ENTRYPOINT [ "/bin/echo" ]\nCMD ["foo"]', 'ENTRYPOINT [ "/bin/echo" ]\nCMD [ "/bin/bash" ]', [ "/bin/echo" ], ["/bin/bash" ]), ('ENTRYPOINT [ "/bin/echo" ]\nCMD ["foo"]', 'ENTRYPOINT [ "/bin/bash" ]\nCMD [ "-x" ]', [ "/bin/bash" ], [ "-x" ]), # c- ('CMD [ "cparent" ]', '', None, ["cparent" ]), # c-c ('CMD [ "c" ]', 'CMD ["c"]', None, ["c"]), # c-C ('CMD [ "cparent" ]', 'CMD ["cchild"]', None, ["cchild"]), # c-E ('CMD [ "cparent" ]', 'ENTRYPOINT ["echild"]', ["echild"], None), # c-Ec ('CMD [ "c" ]', 'ENTRYPOINT ["echild"]\nCMD ["c"]', ["echild"], ["c"]), # c-EC ('CMD [ "cparent" ]', 'ENTRYPOINT ["echild"]\nCMD ["cchild"]', ["echild"], ["cchild"]), # - ('', '', None, None), # -C ('', 'CMD ["cchild"]', None, ["cchild"]), # -E ('', 'ENTRYPOINT ["echild"]', ["echild"], None), # -EC ('', 'ENTRYPOINT ["echild"]\nCMD ["cchild"]', ["echild"], ["cchild"]), # e- ('ENTRYPOINT [ "eparent" ]', '', ["eparent"], None), # e-C ('ENTRYPOINT [ "eparent" ]', 'CMD ["cchild"]', ["eparent"], ["cchild"]), # e-e ('ENTRYPOINT [ "e" ]', 'ENTRYPOINT ["e"]', ["e"], None), # e-E ('ENTRYPOINT [ "eparent" ]', 'ENTRYPOINT ["echild"]', ["echild"], None), # e-EC ('ENTRYPOINT [ "eparent" ]', 'ENTRYPOINT ["echild"]\nCMD ["cchild"]', ["echild"], ["cchild"]), # e-eC ('ENTRYPOINT [ "e" ]', 'ENTRYPOINT ["e"]\nCMD ["cchild"]', ["e"], ["cchild"]), # ec- ('ENTRYPOINT [ "eparent" ]\nCMD["cparent"]', '', ["eparent"], ["cparent"]), # ec-c ('ENTRYPOINT [ "eparent" ]\nCMD["c"]', 'CMD ["c"]', ["eparent"], ["c"]), # ec-C ('ENTRYPOINT [ "eparent" ]\nCMD["cparent"]', 'CMD ["cchild"]', ["eparent"], ["cchild"]), # ec-e ('ENTRYPOINT [ "e" ]\nCMD["cparent"]', 'ENTRYPOINT ["e"]', ["e"], None), # ec-E ('ENTRYPOINT [ "eparent" ]\nCMD["cparent"]', 'ENTRYPOINT ["echild"]', ["echild"], None), # ec-Ec ('ENTRYPOINT [ "eparent" ]\nCMD["c"]', 'ENTRYPOINT ["echild"]\nCMD ["c"]', ["echild"], ["c"]), # ec-eC ('ENTRYPOINT [ "e" ]\nCMD["cparent"]', 'ENTRYPOINT ["e"]\nCMD ["cchild"]', ["e"], ["cchild"]), # ec-EC ('ENTRYPOINT [ "eparent" ]\nCMD["cparent"]', 'ENTRYPOINT ["echild"]\nCMD ["cchild"]', ["echild"], ["cchild"]), # ec-ec ('ENTRYPOINT [ "e" ]\nCMD["c"]', 'ENTRYPOINT ["e"]\nCMD ["c"]', ["e"], ["c"]), ] for parentDF, childDF, expEntrypoint, expCmd in combinations: parent = docker.Dockerfile() parent.parse(parentDF) pManif = parent.toManifest() child = docker.Dockerfile() child.parse(childDF) cManif = child.toManifest() cManif.merge(pManif) self.assertEquals(cManif.entrypoint, expEntrypoint) self.assertEquals(cManif.cmd, expCmd) txt = """ CMD [ "/bin/bash" ] """ df1 = docker.Dockerfile() df1.parse(txt) txt = """ ENTRYPOINT [ "/bin/echo", "foo" ] """ df2 = docker.Dockerfile() df2.parse(txt) self.assertEquals(df1.cmd, ['/bin/bash', ]) self.assertEquals(df1.entrypoint, None) child = df1.toManifest() child.merge(df2.toManifest()) self.assertEquals(child.cmd, ['/bin/bash', ]) self.assertEquals(child.entrypoint, [ "/bin/echo", "foo", ])
#!/usr/bin/env python import ROOT ROOT.gROOT.SetBatch() import AtlasStyle from ploter import Ploter import os from array import array class Comparison: def __init__(self): self.out_root_name = "histograms.root" self.ps = Ploter() if os.path.exists(self.out_root_name): self.fout = ROOT.TFile.Open(self.out_root_name) else: self.fout = None self.cuts_on_weight = {} self.cuts_on_weight[364250] = 20 self.cuts_on_weight[364256] = 30 self.cuts_on_weight[364257] = 20 self.cuts_on_weight[364258] = 40 self.cuts_on_weight[364259] = 100 self.cuts_on_weight[364260] = 25 def has_large_weight(self, tree): try: return tree.w_MCw > self.cuts_on_weight[tree.run] except KeyError: return False def get_hist(self, tree, name, cut): if self.fout: return self.fout.Get(name) #h1 = ROOT.TH1F(name, name, 70, 130, 1530) # define a histogram with various bin-width bin_list = [] mass = 200 while mass <= 1500: bin_list.append(mass) if mass < 300: mass += 20 elif mass < 600: mass += 25 elif mass < 100: mass += 50 else: mass += 100 h1 = ROOT.TH1F(name, name, len(bin_list)-1, array('f', bin_list)) tree.SetBranchStatus("*", 0) tree.SetBranchStatus("run", 1) tree.SetBranchStatus("w_MCw", 1) tree.SetBranchStatus("higgs_m_fidBorn_4lsel", 1) tree.SetBranchStatus("dijet_m_fidBorn_4lsel", 1) tree.SetBranchStatus("dijet_deltaeta_fidBorn_4lsel", 1) tree.SetBranchStatus("event_type_fidBorn_4lsel", 1) total_weight = 0 for ientry in xrange(tree.GetEntries()): tree.GetEntry(ientry) if self.has_large_weight(tree): continue total_weight += tree.w_MCw if tree.higgs_m_fidBorn_4lsel == -999: continue pass_VBF = tree.dijet_m_fidBorn_4lsel > 400 and abs(tree.dijet_deltaeta_fidBorn_4lsel) > 3.3 pass_cuts = False event_type = tree.event_type_fidBorn_4lsel #if cut == 1 and not pass_VBF and event_type == 0: # # ggF 4mu # pass_cuts = True #elif cut == 2 and not pass_VBF and event_type == 1: # # ggF 4e # pass_cuts = True #elif cut == 3 and not pass_VBF and (event_type == 2 or event_type == 3): # # ggF 2mu2e # pass_cuts = True #elif cut == 4 and pass_VBF: # pass_cuts = True #elif cut == -1: # pass_cuts = True #else: # pass if pass_VBF: h1.Fill(tree.higgs_m_fidBorn_4lsel, tree.w_MCw) print "total weight:", total_weight if h1.GetEntries() == 0 or h1.GetIntegral() == 0: print h1.GetName(),"is empty!" exit(1) h1.Scale(1./total_weight) return h1 def go(self): base_dir = "/afs/cern.ch/atlas/groups/HSG2/H4l/run2/2016/MiniTrees/Prod_v12/mc/TheorySystematics/fidBorn/" tree_name = "tree_incl_all" f1_name = "mc15_13TeV.364250.Sherpa_222_NNPDF30NNLO_llll.root" f1_1_name = "mc15_13TeV.364251.Sherpa_222_NNPDF30NNLO_llll_m4l100_300_filt100_150.root" f1_2_name = "mc15_13TeV.364252.Sherpa_222_NNPDF30NNLO_llll_m4l300.root" f2_name = "mc15_13TeV.364256.Sherpa_222_NNPDF30NNLO_llll_CKKW15.root" f3_name = "group.phys-higgs.user.sabidi.364257.Sherpa_222_NNPDF30NNLO_llll_CKKW30.TRUTH4.root" f4_name = "group.phys-higgs.user.sabidi.364258.Sherpa_222_NNPDF30NNLO_llll_QSF025.TRUTH4.root" f5_name = "mc15_13TeV.364259.Sherpa_222_NNPDF30NNLO_llll_QSF4.root" f6_name = "group.phys-higgs.user.sabidi.364260.Sherpa_222_NNPDF30NNLO_llll_CSSKIN.TRUTH4.root" #f1 = ROOT.TFile.Open(base_dir+f1_name) #t1 = f1.Get(tree_name) t1 = ROOT.TChain(tree_name, tree_name) t1.Add(base_dir+f1_name) #t1.Add(base_dir+f1_1_name) #t1.Add(base_dir+f1_2_name) f2 = ROOT.TFile.Open(base_dir+f2_name) t2 = f2.Get(tree_name) f3 = ROOT.TFile.Open(base_dir+f3_name) t3 = f3.Get(tree_name) f4 = ROOT.TFile.Open(base_dir+f4_name) t4 = f4.Get(tree_name) f5 = ROOT.TFile.Open(base_dir+f5_name) t5 = f5.Get(tree_name) f6 = ROOT.TFile.Open(base_dir+f6_name) t6 = f6.Get(tree_name) cuts_dic = {} #cuts_dic["inc"] = -1 #cuts_dic["4mu"] = 1 #cuts_dic["4e"] = 2 #cuts_dic["2e2mu"] = 3 cuts_dic["VBF"] = 4 all_histograms = [] for key,value in cuts_dic.iteritems(): print key,value h1 = self.get_hist(t1, 'h_nominal_'+key, value) h2 = self.get_hist(t2, 'h_ckkw15_'+key, value) h3 = self.get_hist(t3, 'h_ckkw30_'+key, value) h4 = self.get_hist(t4, 'h_qsf0p25_'+key, value) h5 = self.get_hist(t5, 'h_qsf4_'+key, value) h6 = self.get_hist(t6, 'h_csskin_'+key, value) all_histograms.append(h1) all_histograms.append(h2) all_histograms.append(h3) all_histograms.append(h4) all_histograms.append(h5) all_histograms.append(h6) for hist in all_histograms: hist.Rebin(4) hist.Scale(1./hist.Integral()) list_ckkw = [h1, h2, h3] tag_ckkw = ["Nominal", "ckkw 15", "ckkw 30"] opt = {} opt['out_name'] = "sys_ckkw_"+key opt['add_yields'] = True opt['no_fill'] = True opt['ratio_title'] = "Variation/Nominal" self.ps.compare_hists(list_ckkw, tag_ckkw, **opt) #Ressummation scale list_qfs = [h1, h4, h5] tag_qfs = ["Nominal", "QSF 1/4", "QSF 4"] opt['out_name']= 'sys_qsf_'+key self.ps.compare_hists(list_qfs, tag_qfs, **opt) #Catani-Seymour shower option list_csskin = [h1, h6] tag_csskin = ["Nominal", "CSSKIN"] opt['out_name']= 'sys_csskin_'+key self.ps.compare_hists(list_csskin, tag_csskin, **opt) #everything list_all = [h1, h2, h3, h4, h5, h6] tag_all = ["Nominal", "ckkw 15", "ckkw 30", "QSF 1/4", "QSF 4", "CSSKIN"] opt['out_name']= 'sys_all_'+key self.ps.compare_hists(list_all, tag_all, **opt) if not self.fout: self.fout = ROOT.TFile.Open(self.out_root_name, "recreate") for hist in all_histograms: hist.Write() self.fout.Close() #f1.Close() f2.Close() f3.Close() f4.Close() f5.Close() f6.Close() if __name__ == "__main__": cmp = Comparison() cmp.go()
# -*- coding: utf-8 -*- # JN 2016-01-12 from __future__ import print_function, division, absolute_import import os import csv import glob import numpy as np import tables from .man_spikes import SpikeManager from .tools import expandts, debug from .. import make_attrs FNAME_H5META = 'h5meta.txt' class H5Manager(object): """ Backend for h5 files containing continuously sampled data """ def __init__(self, files, modify=False, load_events=True): """ Initialize with given files """ self.timesteps = {} self.qs = {} self.entnames = {} self.bitvolts = {} self.fid = {} self.spm = None self.time_factors = {} self.events = {} self.modify = modify self.folder = os.path.dirname(files[0]) self.init_meta() if modify: mode = 'r+' else: mode = 'r' # open the requested files for fname in files: fid = tables.open_file(fname, mode) key = os.path.basename(fname)[:-6] if key in self.entnames: entname = self.entnames[key] self.fid[entname] = fid # check if events exist if load_events: event_fname = os.path.join(self.folder, key + '_events.h5') if os.path.exists(event_fname): debug('Loading ' + event_fname) self.events[entname] = tables.open_file(event_fname, 'r') self.chs = sorted(self.fid.keys()) if not len(self.chs): raise(ValueError('No channels found')) def __del__(self): """ close the h5 files """ if hasattr(self, 'spm'): del self.spm if hasattr(self, 'fid'): for fid in self.fid.values(): fid.close() def init_spikes(self, sign, label): """ initialize unsorted or sorted spike data """ self.spm = SpikeManager() for key, entname in self.entnames.items(): self.spm.check_add(key, entname) def init_meta(self): """ initialize the meta information for all channels """ if os.path.exists(os.path.join(self.folder, FNAME_H5META)): with open(FNAME_H5META, 'r') as fid: reader = csv.reader(fid, delimiter=';') metad = list(reader) else: cand = glob.glob(os.path.join(self.folder, '*_ds.h5')) metad = make_attrs(cand) effective_ts = {} for flds in metad: key = flds[0] entname = flds[1] adbitvolts = float(flds[2]) self.entnames[key] = entname self.bitvolts[entname] = adbitvolts self.qs[entname] = int(flds[3]) self.timesteps[entname] = float(flds[4])/1e3 effective_ts[entname] = self.qs[entname] * self.timesteps[entname] # calculate the relative sampling rates min_effective_ts = min(effective_ts.values()) for name, ts in effective_ts.items(): rel = ts/min_effective_ts if rel - int(rel) > 1e-6: raise Warning("Relative sampling rates have to be integers") # for each channel, store the multiplication factor self.time_factors[name] = int(rel) debug((self.entnames, self.bitvolts, self.qs, self.timesteps)) debug(self.time_factors) def _mod_times(self, q, start, stop): """ helper function for time stamp conversion """ start *= q stop *= q tstart = int(start/512) tstop = int(stop/512) + 1 shift = int((start % 512)/q) if tstart > tstop: raise Warning('Time stamp conversion failure!') return tstart, tstop, shift def translate(self, ch, sample): """ transform samples according to relative times """ factor = self.time_factors[ch] return int(sample/factor) def get_time(self, ch, start, stop): """ return times, by upscaling according to each channel's individual q factor """ q = self.qs[ch] ts = self.timesteps[ch] tstart, tstop, shift = self._mod_times(q, start, stop) obj = self.fid[ch] timeraw = obj.root.time[tstart:tstop+512/q] tar = np.array(timeraw, 'float64')/1000 # time needs to be shifted time = expandts(tar, ts, q)[shift:stop-start+shift] assert(time.shape[0] == (stop - start)) return time def get_data(self, ch, start, stop, traces=[]): """ read data """ adbitvolts = self.bitvolts[ch] # make it an array of columns here temp = [] # debug(self.fid.keys()) obj = self.fid[ch] for trace in traces: try: temp_d = obj.get_node('/data', trace)[start:stop] temp.append(temp_d) except tables.NoSuchNodeError as error: debug(error) data = np.vstack(temp) return data, adbitvolts def get_events(self, ch, start, stop, trace): """ read events in the given window """ if ch in self.events: obj = self.events[ch] else: return [] try: # this is a bit unefficient because we just need # certain parts, but it's not easy to do better temp_d = obj.get_node('/', trace)[:, :] except tables.NoSuchNodeError: return [] idx = (temp_d[:, 0] >= start) & (temp_d[:, 1] <= stop) return temp_d[idx, :] def add_trace(self, ch, trace_name): """ add a trace """ size = self.fid[ch].root.data.rawdata.shape[0] zeros = np.zeros(size, np.int16) if not self.modify: print('Cannot add trace because modify is False') return try: self.fid[ch].create_array('/data', trace_name, zeros) except tables.exceptions.NodeError as error: print(error) print('Not re-creating') def test(): """ simple test function """ from argparse import ArgumentParser import matplotlib.pyplot as mpl parser = ArgumentParser() parser.add_argument('fnames', nargs='+') args = parser.parse_args() h5man = H5Manager(args.fnames) chs = h5man.chs start = 10000 nblocks = 20000 # this is to test referencing ch = 'Cb1' start_ch = h5man.translate(ch, start) stop_ch = start_ch + h5man.translate(ch, nblocks) ref, adbitvolts = h5man.get_data(ch, start_ch, stop_ch, ['rawdata']) for i, ch in enumerate(chs): print(ch) start_ch = h5man.translate(ch, start) stop_ch = start_ch + h5man.translate(ch, nblocks) d, adbitvolts = h5man.get_data(ch, start_ch, stop_ch, ['rawdata']) time = h5man.get_time(ch, start_ch, stop_ch) print('Plotting {} seconds of data'.format((time[-1] - time[0])/1e3)) mpl.plot(time, (d - ref) * adbitvolts + 100*i, 'darkblue') mpl.text(time[0], i*100, ch, backgroundcolor='w') mpl.show() del h5man
import Queue import errno import logging import os import random import re import select import socket import time from collections import defaultdict, deque from threading import Lock from lbryum import __version__ as LBRYUM_VERSION from lbryum.constants import COIN, BLOCKS_PER_CHUNK, DEFAULT_PORTS, proxy_modes from lbryum.constants import SERVER_RETRY_INTERVAL, NODES_RETRY_INTERVAL, NETWORK_TIMEOUT from lbryum.util import DaemonThread, normalize_version from lbryum.blockchain import get_blockchain from lbryum.interface import Connection, Interface from lbryum.simple_config import SimpleConfig from lbryum.version import PROTOCOL_VERSION log = logging.getLogger(__name__) def is_online(host, ports): ip = socket.gethostbyname(host) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(2) result = sock.connect_ex((ip, int(ports['t']))) sock.close() if result == 0: log.info("%s:%s is online", host, ports['t']) return True return False def parse_servers(result): """ parse servers list into dict format""" servers = {} for item in result: host = item[1] out = {} version = None pruning_level = '-' if len(item) > 2: for v in item[2]: if re.match("[stgh]\d*", v): protocol, port = v[0], v[1:] if port == '': port = DEFAULT_PORTS[protocol] out[protocol] = port elif re.match("v(.?)+", v): version = v[1:] elif re.match("p\d*", v): pruning_level = v[1:] if pruning_level == '': pruning_level = '0' try: is_recent = cmp(normalize_version(version), normalize_version(PROTOCOL_VERSION)) >= 0 except Exception: is_recent = False if out and is_recent: out['pruning'] = pruning_level servers[host] = out return servers def filter_protocol(hostmap, protocol='s'): """Filters the hostmap for those implementing protocol. The result is a list in serialized form.""" eligible = [] for host, portmap in hostmap.items(): port = portmap.get(protocol) if port: eligible.append(serialize_server(host, port, protocol)) return eligible # noinspection PyPep8 def pick_random_server(hostmap, protocol='t', exclude_set=set()): eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set) return random.choice(eligible) if eligible else None def serialize_proxy(p): if type(p) != dict: return None return ':'.join([p.get('mode'), p.get('host'), p.get('port')]) def deserialize_proxy(s): if type(s) not in [str, unicode]: return None if s.lower() == 'none': return None proxy = {"mode": "socks5", "host": "localhost"} args = s.split(':') n = 0 if proxy_modes.count(args[n]) == 1: proxy["mode"] = args[n] n += 1 if len(args) > n: proxy["host"] = args[n] n += 1 if len(args) > n: proxy["port"] = args[n] else: proxy["port"] = "8080" if proxy["mode"] == "http" else "1080" return proxy def deserialize_server(server_str): host, port, protocol = str(server_str).split(':') assert protocol in 'st' int(port) # Throw if cannot be converted to int return host, port, protocol def serialize_server(host, port, protocol): return str(':'.join([host, port, protocol])) class Network(DaemonThread): """The Network class manages a set of connections to remote lbryum servers, each connected socket is handled by an Interface() object. Connections are initiated by a Connection() thread which stops once the connection succeeds or fails. Our external API: - Member functions get_header(), get_interfaces(), get_local_height(), get_parameters(), get_server_height(), get_status_value(), is_connected(), set_parameters(), stop() """ def __init__(self, config=None): if config is None: config = {} # Do not use mutables as default values! DaemonThread.__init__(self) self.config = SimpleConfig(config) if isinstance(config, dict) else config self.num_server = 8 if not self.config.get('oneserver') else 0 self.blockchain = get_blockchain(self.config, self) # A deque of interface header requests, processed left-to-right self.bc_requests = deque() # Server for addresses and transactions self.default_server = self.config.get('server') # Sanitize default server try: deserialize_server(self.default_server) except: self.default_server = None if not self.default_server: default_servers = self.config.get('default_servers') if not default_servers: raise ValueError('No servers have been specified') self.default_server = pick_random_server(default_servers) self.lock = Lock() self.pending_sends = [] self.message_id = 0 self.debug = False self.irc_servers = {} # returned by interface (list from irc) self.banner = '' self.fee = None self.relay_fee = None self.heights = {} self.merkle_roots = {} self.utxo_roots = {} # catchup counter, used to track catchup progress before chain is verified and headers saved self.catchup_progress = 0 # callbacks passed with subscriptions self.subscriptions = defaultdict(list) self.sub_cache = {} self.callbacks = defaultdict(list) dir_path = os.path.join(self.config.path, 'certs') if not os.path.exists(dir_path): os.mkdir(dir_path) # subscriptions and requests self.subscribed_addresses = set() # Requests from client we've not seen a response to self.unanswered_requests = {} # retry times self.server_retry_time = time.time() self.nodes_retry_time = time.time() # kick off the network. interface is the main server we are currently # communicating with. interfaces is the set of servers we are connecting # to or have an ongoing connection with self.interface = None self.interfaces = {} self.auto_connect = self.config.get('auto_connect', False) self.connecting = set() self.socket_queue = Queue.Queue() self.online_servers = {} self._set_online_servers() self.start_network(deserialize_server(self.default_server)[2], deserialize_proxy(self.config.get('proxy'))) def register_callback(self, callback, events): with self.lock: for event in events: self.callbacks[event].append(callback) def unregister_callback(self, callback): with self.lock: for callbacks in self.callbacks.values(): if callback in callbacks: callbacks.remove(callback) def trigger_callback(self, event, *args): with self.lock: callbacks = self.callbacks[event][:] for callback in callbacks: callback(event, *args) def get_server_height(self): return self.heights.get(self.default_server, 0) def server_is_lagging(self): sh = self.get_server_height() if not sh: log.info('no height for main interface') return True lh = self.get_local_height() result = (lh - sh) > 1 if result: log.info('%s is lagging (%d vs %d)', self.default_server, sh, lh) return result def set_status(self, status): self.connection_status = status self.notify('status') def is_connected(self): return self.interface is not None def is_connecting(self): return self.connection_status == 'connecting' def is_up_to_date(self): return self.unanswered_requests == {} def queue_request(self, method, params, interface=None): # If you want to queue a request on any interface it must go # through this function so message ids are properly tracked if interface is None: interface = self.interface message_id = self.message_id self.message_id += 1 if self.debug: log.debug('%s --> %s, %s, %s', interface.host, method, params, message_id) interface.queue_request(method, params, message_id) return message_id def send_subscriptions(self): log.info( 'sending subscriptions to %s. Unanswered requests: %s, Subscribed addresses: %s', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses)) self.sub_cache.clear() # Resend unanswered requests requests = self.unanswered_requests.values() self.unanswered_requests = {} for request in requests: message_id = self.queue_request(request[0], request[1]) self.unanswered_requests[message_id] = request for addr in self.subscribed_addresses: self.queue_request('blockchain.address.subscribe', [addr]) self.queue_request('server.banner', []) self.queue_request('server.peers.subscribe', []) self.queue_request('blockchain.estimatefee', [2]) self.queue_request('blockchain.relayfee', []) def get_status_value(self, key): if key == 'status': value = self.connection_status elif key == 'banner': value = self.banner elif key == 'fee': value = self.fee elif key == 'updated': value = (self.get_local_height(), self.get_server_height()) elif key == 'servers': value = self.get_servers() elif key == 'interfaces': value = self.get_interfaces() return value def notify(self, key): if key in ['status', 'updated']: self.trigger_callback(key) else: self.trigger_callback(key, self.get_status_value(key)) def get_parameters(self): host, port, protocol = deserialize_server(self.default_server) return host, port, protocol, self.auto_connect def get_interfaces(self): """The interfaces that are in connected state""" return self.interfaces.keys() # Do an initial pruning of lbryum servers that don't have the specified port open def _set_online_servers(self): servers = self.config.get('default_servers', {}).iteritems() self.online_servers = { host: ports for host, ports in servers if is_online(host, ports) } def get_servers(self): if self.irc_servers: out = self.irc_servers else: out = self.online_servers return out def start_interface(self, server): if server not in self.interfaces and server not in self.connecting: if server == self.default_server: log.info("connecting to %s as new interface", server) self.set_status('connecting') self.connecting.add(server) c = Connection(server, self.socket_queue, self.config.path) def start_random_interface(self): exclude_set = self.disconnected_servers.union(set(self.interfaces)) server = pick_random_server(self.get_servers(), self.protocol, exclude_set) if server: self.start_interface(server) def start_interfaces(self): self.start_interface(self.default_server) for i in range(self.num_server - 1): self.start_random_interface() def start_network(self, protocol, proxy): assert not self.interface and not self.interfaces assert not self.connecting and self.socket_queue.empty() log.info('starting network') self.disconnected_servers = set([]) self.protocol = protocol self.start_interfaces() def stop_network(self): log.info("stopping network") for interface in self.interfaces.values(): self.close_interface(interface) assert self.interface is None assert not self.interfaces self.connecting = set() # Get a new queue - no old pending connections thanks! self.socket_queue = Queue.Queue() def set_parameters(self, host, port, protocol, proxy, auto_connect): proxy_str = serialize_proxy(proxy) server = serialize_server(host, port, protocol) self.config.set_key('auto_connect', auto_connect, False) self.config.set_key("proxy", proxy_str, False) self.config.set_key("server", server, True) # abort if changes were not allowed by config if self.config.get('server') != server or self.config.get('proxy') != proxy_str: return self.auto_connect = auto_connect if self.default_server != server: self.switch_to_interface(server) else: self.switch_lagging_interface() def switch_to_random_interface(self): '''Switch to a random connected server other than the current one''' servers = self.get_interfaces() # Those in connected state if self.default_server in servers: servers.remove(self.default_server) if servers: self.switch_to_interface(random.choice(servers)) def switch_lagging_interface(self, suggestion=None): '''If auto_connect and lagging, switch interface''' if self.server_is_lagging() and self.auto_connect: if suggestion and self.protocol == deserialize_server(suggestion)[2]: self.switch_to_interface(suggestion) else: self.switch_to_random_interface() def switch_to_interface(self, server): '''Switch to server as our interface. If no connection exists nor being opened, start a thread to connect. The actual switch will happen on receipt of the connection notification. Do nothing if server already is our interface.''' self.default_server = server if server not in self.interfaces: self.interface = None self.start_interface(server) return i = self.interfaces[server] if self.interface != i: log.info("switching to %s", server) # stop any current interface in order to terminate subscriptions self.close_interface(self.interface) self.interface = i self.send_subscriptions() self.set_status('connected') self.notify('updated') def close_interface(self, interface): if interface: self.interfaces.pop(interface.server) if interface.server == self.default_server: self.interface = None interface.close() def process_response(self, interface, response, callbacks): if self.debug: log.debug("<-- %s", response) error = response.get('error') result = response.get('result') method = response.get('method') params = response.get('params') # We handle some responses; return the rest to the client. if method == 'server.version': interface.server_version = result elif method == 'blockchain.headers.subscribe': if error is None: self.on_header(interface, result) elif method == 'server.peers.subscribe': if error is None: self.irc_servers = parse_servers(result) self.notify('servers') elif method == 'server.banner': if error is None: self.banner = result self.notify('banner') elif method == 'blockchain.estimatefee': if error is None: self.fee = int(result * COIN) log.info("recommended fee %s", self.fee) self.notify('fee') elif method == 'blockchain.relayfee': if error is None: self.relay_fee = int(result * COIN) log.info("relayfee %s", self.relay_fee) elif method == 'blockchain.block.get_chunk': self.on_get_chunk(interface, response) elif method == 'blockchain.block.get_header': self.on_get_header(interface, response) for callback in callbacks: callback(response) def get_index(self, method, params): """ hashable index for subscriptions and cache""" return str(method) + (':' + str(params[0]) if params else '') def process_responses(self, interface): responses = interface.get_responses() for request, response in responses: if request: method, params, message_id = request k = self.get_index(method, params) # client requests go through self.send() with a # callback, are only sent to the current interface, # and are placed in the unanswered_requests dictionary client_req = self.unanswered_requests.pop(message_id, None) if client_req: assert interface == self.interface callbacks = [client_req[2]] else: callbacks = [] # Copy the request method and params to the response response['method'] = method response['params'] = params # Only once we've received a response to an addr subscription # add it to the list; avoids double-sends on reconnection if method == 'blockchain.address.subscribe': self.subscribed_addresses.add(params[0]) else: if not response: # Closed remotely / misbehaving self.connection_down(interface.server) break # Rewrite response shape to match subscription request response method = response.get('method') params = response.get('params') k = self.get_index(method, params) if method == 'blockchain.headers.subscribe': response['result'] = params[0] response['params'] = [] elif method == 'blockchain.address.subscribe': response['params'] = [params[0]] # addr response['result'] = params[1] callbacks = self.subscriptions.get(k, []) # update cache if it's a subscription if method.endswith('.subscribe'): self.sub_cache[k] = response # Response is now in canonical form self.process_response(interface, response, callbacks) def send(self, messages, callback): '''Messages is a list of (method, params) tuples''' with self.lock: self.pending_sends.append((messages, callback)) def process_pending_sends(self): # Requests needs connectivity. If we don't have an interface, # we cannot process them. if not self.interface: return with self.lock: sends = self.pending_sends self.pending_sends = [] for messages, callback in sends: for method, params in messages: r = None if method.endswith('.subscribe'): k = self.get_index(method, params) # add callback to list l = self.subscriptions.get(k, []) if callback not in l: l.append(callback) self.subscriptions[k] = l # check cached response for subscriptions r = self.sub_cache.get(k) if r is not None: log.warning("cache hit: %s", k) callback(r) else: message_id = self.queue_request(method, params) self.unanswered_requests[message_id] = method, params, callback def unsubscribe(self, callback): '''Unsubscribe a callback to free object references to enable GC.''' # Note: we can't unsubscribe from the server, so if we receive # subsequent notifications process_response() will emit a harmless # "received unexpected notification" warning with self.lock: for v in self.subscriptions.values(): if callback in v: v.remove(callback) def connection_down(self, server): '''A connection to server either went down, or was never made. We distinguish by whether it is in self.interfaces.''' self.disconnected_servers.add(server) if server == self.default_server: self.set_status('disconnected') if server in self.interfaces: self.close_interface(self.interfaces[server]) self.heights.pop(server, None) self.notify('interfaces') def new_interface(self, server, socket): self.interfaces[server] = interface = Interface(server, socket) self.queue_request('blockchain.headers.subscribe', [], interface) if server == self.default_server: self.switch_to_interface(server) self.notify('interfaces') def maintain_sockets(self): '''Socket maintenance.''' # Responses to connection attempts? while not self.socket_queue.empty(): server, socket = self.socket_queue.get() self.connecting.remove(server) if socket: self.new_interface(server, socket) else: self.connection_down(server) # Send pings and shut down stale interfaces for interface in self.interfaces.values(): if interface.has_timed_out(): self.connection_down(interface.server) elif interface.ping_required(): params = [LBRYUM_VERSION, PROTOCOL_VERSION] self.queue_request('server.version', params, interface) now = time.time() # nodes if len(self.interfaces) + len(self.connecting) < self.num_server: self.start_random_interface() if now - self.nodes_retry_time > NODES_RETRY_INTERVAL: log.info('network: retrying connections') self.disconnected_servers = set([]) self.nodes_retry_time = now # main interface if not self.is_connected(): if self.auto_connect: if not self.is_connecting(): self.switch_to_random_interface() else: if self.default_server in self.disconnected_servers: if now - self.server_retry_time > SERVER_RETRY_INTERVAL: self.disconnected_servers.remove(self.default_server) self.server_retry_time = now else: self.switch_to_interface(self.default_server) def request_chunk(self, interface, data, idx): log.debug("requesting chunk %d" % idx) self.queue_request('blockchain.block.get_chunk', [idx], interface) data['chunk_idx'] = idx data['req_time'] = time.time() def _caught_up_to_interface(self, data): return self.get_local_height() >= data['if_height'] def _need_chunk_from_interface(self, data): return self.get_local_height() + BLOCKS_PER_CHUNK <= data['if_height'] def on_get_chunk(self, interface, response): """Handle receiving a chunk of block headers""" if self.bc_requests: req_if, data = self.bc_requests[0] req_idx = data.get('chunk_idx') # Ignore unsolicited chunks if req_if == interface and req_idx == response['params'][0]: idx = self.blockchain.connect_chunk(req_idx, response['result']) if idx < 0 or self._caught_up_to_interface(data): self.bc_requests.popleft() self.notify('updated') elif self._need_chunk_from_interface(data): self.request_chunk(interface, data, idx) else: self.request_header(interface, data, data['if_height']) def request_header(self, interface, data, height): log.debug("requesting header %d" % height) self.queue_request('blockchain.block.get_header', [height], interface) data['header_height'] = height data['req_time'] = time.time() if 'chain' not in data: data['chain'] = [] def on_get_header(self, interface, response): """Handle receiving a single block header""" if self.bc_requests: req_if, data = self.bc_requests[0] req_height = data.get('header_height', -1) # Ignore unsolicited headers if req_if == interface and req_height == response['params'][0]: next_height = self.blockchain.connect_header(data['chain'], response['result']) self.catchup_progress += 1 # If not finished, get the next header if next_height is True or next_height is False: self.catchup_progress = 0 self.bc_requests.popleft() if next_height: self.switch_lagging_interface(interface.server) self.notify('updated') else: log.warning("header didn't connect, dismissing interface") interface.close() else: self.request_header(interface, data, next_height) def bc_request_headers(self, interface, data): """Send a request for the next header, or a chunk of them, if necessary. """ local_height, if_height = self.get_local_height(), data['if_height'] if if_height < local_height: return False elif if_height > local_height + BLOCKS_PER_CHUNK: self.request_chunk(interface, data, (local_height + 1) / BLOCKS_PER_CHUNK) else: self.request_header(interface, data, if_height) return True def handle_bc_requests(self): """Work through each interface that has notified us of a new header. Send it requests if it is ahead of our blockchain object. """ while self.bc_requests: interface, data = self.bc_requests.popleft() # If the connection was lost move on if interface not in self.interfaces.values(): continue req_time = data.get('req_time') if not req_time: # No requests sent yet. This interface has a new height. # Request headers if it is ahead of our blockchain if not self.bc_request_headers(interface, data): continue elif time.time() - req_time > NETWORK_TIMEOUT: if interface.has_timed_out(): # disconnect only if responses are really not being received log.error("blockchain request timed out") self.connection_down(interface.server) continue # Put updated request state back at head of deque self.bc_requests.appendleft((interface, data)) break def wait_on_sockets(self): # Python docs say Windows doesn't like empty selects. # Sleep to prevent busy looping if not self.interfaces: time.sleep(0.1) return rin = [i for i in self.interfaces.values()] win = [i for i in self.interfaces.values() if i.unsent_requests] failed = False try: rout, wout, xout = select.select(rin, win, [], 0.2) except select.error as (code, msg): if code == errno.EINTR: return failed = True if failed or xout: for interface in self.interfaces.values(): self.connection_down(interface.server) return for interface in wout: interface.send_requests() for interface in rout: self.process_responses(interface) def run(self): log.info('Initializing the blockchain') self.blockchain.init() log.info('Blockchain initialized, starting run loop') while self.is_running(): self.maintain_sockets() self.wait_on_sockets() self.handle_bc_requests() self.run_jobs() # Synchronizer and Verifier self.process_pending_sends() log.info('Stopping network') self.stop_network() log.info("stopped") def on_header(self, i, header): height = header.get('block_height') if not height: return self.heights[i.server] = height self.merkle_roots[i.server] = header.get('merkle_root') self.utxo_roots[i.server] = header.get('utxo_root') # Queue this interface's height for asynchronous catch-up self.bc_requests.append((i, {'if_height': height})) if i == self.interface: self.switch_lagging_interface() self.notify('updated') def get_header(self, tx_height): return self.blockchain.read_header(tx_height) def get_local_height(self): return self.blockchain.height() def get_blocks_behind(self): """An estimate of the number of blocks remaing to download from the server""" local_height = self.get_local_height() remote_height = self.get_server_height() if remote_height < local_height: return None diff = remote_height - local_height if diff > 0: return diff - self.catchup_progress else: return 0 def synchronous_get(self, request, timeout=NETWORK_TIMEOUT): queue = Queue.Queue() self.send([request], queue.put) try: r = queue.get(True, timeout) except Queue.Empty: msg = 'Failed to get response from server within timeout of {}'.format(timeout) raise BaseException(msg) if r.get('error'): raise BaseException(r.get('error')) return r.get('result')
from time import sleep, strftime from os import system, remove from subprocess import Popen from traceback import print_exc from msvcrt import getch class AllFunctions(): def load(FileName): #Open text file and return todo list and pri list ToList = [] State = open(FileName, "r") for Line in State: Line = Line.replace("\n", "") ToList.append(Line) PList = [] TempList = [] Lenght = len(ToList) C = 0 while C < Lenght: CUT = ToList[C].split("PRI") TempList.append(CUT[0]) PList.append(CUT[1]) C = C+1 ToList = [] ToList = TempList return(ToList, PList) def save(FileName, ToList, PList): #Update text file to current lsit state remove(FileName) State = open(FileName, "a") C = 0 for Item in ToList: State.write(ToList[C]+"PRI"+PList[C]+"\n") C = C+1 def post(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode): #Post list to screen with colors system("cls") print(" ") print("\t"+"\t"+"\033[1;37;40m "+"\t--Todo List--") print(" ") C = 0 Max = len(ToList) while C < Max: Coef = str(C+1) if PList[C] == "1": print("\t"+"\033[1;"+PriOneCode+";40m "+Coef+"\t"+ ToList[C]) elif PList[C] == "2": print("\t"+"\033[1;"+PriTwoCode+";40m "+Coef+"\t"+ ToList[C]) elif PList[C] == "3": print("\t"+"\033[1;"+PriThreeCode+";40m "+Coef+"\t"+ ToList[C]) else: print("\t"+"\033[1;"+PriZeroCode+";40m "+Coef+"\t"+ToList[C]) C = C+1 print("\033[1;37;40m "+" ") if len(ToList) == 0: print("\t"+"\033[1;35;40m "+"\tNothing To Do !!") print("\033[1;37;40m "+" ") def add(Item, ToList, PList): #add item to list ToList.append(Item) PList.append("0") return(ToList, PList) def pri(Num, NP, PList): #chage pri of item in list Max = len(PList) C = int(Num)-1 if C >= Max or C < 0: return if int(NP) > 3 or int(NP) < 0: return PList[C] = NP return(PList) def done(Num, ToList, PList): #remove item from list Max = len(ToList) C = int(Num)-1 if C >= Max or C < 0: return del ToList[C] del PList[C] return(ToList, PList) def switch(NumO, NumT, ToList, PList): #witch position of two items in list CO = int(NumO)-1 CT = int(NumT)-1 Max = len(ToList) if CO >= Max or CT >= Max or CO < 0 or CT < 0: return TempVal = "" TempVal = ToList[CO] ToList[CO] = ToList[CT] ToList[CT] = TempVal TempVal = "0" TempVal = PList[CO] PList[CO] = PList[CT] PList[CT] = TempVal return(ToList, PList) def fliter(TEXT, ToList, PList): #filter by a search term FlitList = [] FLP = [] C = 0 for Item in ToList: if TEXT in Item: FlitList.append(Item) FLP.append(PList[C]) C = C+1 return(FlitList, FLP) def fliterpri(Num, ToList, PList): #filter by a priority level FlitList = [] FLP = [] C = 0 for Item in PList: if Num in Item: FlitList.append(ToList[C]) FLP.append(Item) C = C+1 return(FlitList, FLP) def BulkAdd(ToList, PList): #open text flie to bulk add items Info = open("BulkAdd.txt", "a") Info.write("--- Write Tasks Line by Line below ---\n+XX @XX/XX/2017 TEXT") Info.close() p = Popen(('notepad.exe', 'BulkAdd.txt')) p.wait() Info = open("BulkAdd.txt", "r+") C = 0 for Line in Info: if "\n" in Line: Line = Line.replace("\n", "") if C > 0: ToList, PList = AllFunctions.add(Line, ToList, PList) C = C+1 Info.close() remove("BulkAdd.txt") return(ToList, PList) def BulkWork(FileName): #open text file to bulk edit items p = Popen(('notepad.exe', FileName)) p.wait() ToList, PList = AllFunctions.load(FileName) return(ToList, PList) def SortPri(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode, S): #buble sort by priority if S == 0: system("cls") print("Sorting...") Lenght = len(PList) Cur = 0 C = 1 while AllFunctions.OrderCheck(PList): while C < Lenght: if int(PList[Cur]) <= int(PList[(Cur+1)]): ToList, PList = AllFunctions.switch((Cur+1), (Cur+2), ToList, PList) Cur = Cur+1 C = C+1 if S == 1: AllFunctions.post(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode) sleep(0.02) C = 1 Cur = 0 return(ToList, PList) def OrderCheck(L): #check order of items in list Lenght = len(L) Cur = 0 C = 1 while C < Lenght: if int(L[Cur]) >= int(L[(Cur+1)]): Cur = Cur+1 else: return True C = C+1 return False def SortDate(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode, S): #buble sort by date DateList = [] for Item in ToList: if "@" in Item: day = Item[(Item.index("@")+1)]+Item[(Item.index("@")+2)] month = Item[(Item.index("@")+4)]+Item[(Item.index("@")+5)] year = Item[(Item.index("@")+7)]+Item[(Item.index("@")+8)]+Item[(Item.index("@")+9)]+Item[(Item.index("@")+10)] Date = (int(day)*86164)+(int(month)*2592000)+(int(year)*31563000) DateList.append(Date) else: DateList.append(0) if S == 0: system("cls") print("Sorting...") Lenght = len(ToList) Cur = 0 C = 1 while AllFunctions.OrderCheck(DateList): while C < Lenght: if int(DateList[Cur]) <= int(DateList[(Cur+1)]): ToList, PList = AllFunctions.switch((Cur+1), (Cur+2), ToList, PList) temp = DateList[Cur] DateList[Cur] = DateList[(Cur+1)] DateList[(Cur+1)] = temp Cur = Cur+1 C = C+1 if S == 1: AllFunctions.post(ToList, PList, PriOneCode, PriTwoCode, PriThreeCode, PriZeroCode) sleep(0.02) C = 1 Cur = 0 ToList.reverse() PList.reverse() return(ToList, PList) def PriByDate(ToList, PList): #prooritiz by date DateList = [] for Item in ToList: if "@" in Item: day = Item[(Item.index("@")+1)]+Item[(Item.index("@")+2)] month = Item[(Item.index("@")+4)]+Item[(Item.index("@")+5)] year = Item[(Item.index("@")+7)]+Item[(Item.index("@")+8)]+Item[(Item.index("@")+9)]+Item[(Item.index("@")+10)] Date = (int(day)*86164)+(int(month)*2592000)+(int(year)*31536000) DateList.append(Date) else: DateList.append(0) TDay = int(strftime("%d")) TMonth = int(strftime("%m")) TYear = int(strftime("%y"))+2000 TDate = (TDay*86164)+(TMonth*2592000)+(TYear*31536000) C = 0 for Item in ToList: if DateList[C] <= TDate: PList[C] = "3" elif DateList[C] < (TDate+86400): PList[C] = "3" elif DateList[C] <= (TDate+172800): PList[C] = "2" elif DateList[C] <= (TDate+604800): PList[C] = "1" else: PList[C] = "0" C = C+1 return(ToList, PList) def Export(ToList, PList, ToFile): #export and move using a bat file Ex = open(ToFile, "w") C = 0 Ex.write("\t\t -- Todo -- \n") Ex.write("\n") for Item in ToList: Ex.write("PRI: "+PList[C]+"\t"+ToList[C]+"\n") C = C+1 Ex.close() system("start MoveList.bat") def UpCACHE(LineNum, Cvar, ToFile): LineNum = LineNum-1 with open(ToFile, 'r+') as UP: LineLenghts = [] for Line in UP: LineLenghts.append((len(Line)+1)) C = 0 Cur = 0 while C < LineNum: Cur = Cur+LineLenghts[C] C = C+1 UP.seek(Cur) PrintLen = len(str(Cvar)) Offset = 10 - PrintLen while Offset > 0: UP.write("0") Offset = Offset-1 UP.write(str(Cvar)) def ReCACHE(StartUpCount, ErrorCount, TaskCount, ToFile): StartUpCount = 0 ErrorCount = 0 TaskCount = 0 AllFunctions.UpCACHE(1, StartUpCount, ToFile) AllFunctions.UpCACHE(2, ErrorCount, ToFile) AllFunctions.UpCACHE(3, TaskCount, ToFile) def LoadCACHE(ToFile): with open(ToFile, "r+") as CACHE: CACHEList = [] for Line in CACHE: CACHEList.append(Line[0:10]) StartUpCount = int(CACHEList[0]) ErrorCount = int(CACHEList[1]) TaskCount = int(CACHEList[2]) PO = CACHEList[3] PTW = CACHEList[4] PTH = CACHEList[5] PZ = CACHEList[6] CACHEList = [] return(StartUpCount, ErrorCount, TaskCount, PO, PTW, PTH, PZ) def edit(text): C = len(text) A = 0 while True: system("cls") print(text[:C]+"|"+text[C:]) Ent = getch() if Ent == b'\x08': if C > 0: C = C-1 text = text[:C]+text[(C+1):] elif Ent == b'\r': system("cls") return(text) elif Ent == b'\xe0': A = 1 print(A) elif Ent == b'K' and A == 1: if C > 0: A = 0 C = C-1 elif Ent == b'M' and A == 1: if C < len(text): A = 0 C = C+1 elif Ent == b'S' and A == 1: if C < len(text): A = 0 text = text[:C]+text[(C+1):] elif A == 1: A = 0 pass else: Ent = str(Ent) Ent = Ent[2:] Ent = Ent[:1] text = text[:C]+Ent+text[C:] C = C+1 def template(): system("cls") print(" ") print("Templates Menu") print(" ") print("T - Open template file") print("S - switch back to TodoTXT") print(" ") Input = input("Choose an option: ") if Input == "T" or Input == "t": FileName = input("Enter template file name: ") if ".txt" in FileName: pass else: FileName = FileName+".txt" FileCreate = open(FileName, "a") FileCreate.close() return(FileName) elif Input == "S" or Input == "s": return("TodoTXT.txt") def color(ToFile, Pone, Ptwo, Pthree, Pzero): system("cls") print(" ") print("chage color for priority 0, 1, 2, or 3 ?") print(" ") Ent = input(":") if Ent == "0": P = 0 elif Ent == "1": P = 1 elif Ent == "2": P = 2 elif Ent == "3": P = 3 else: return C = 0 A = 0 while True: system("cls") print(" ") print("\033[1;37;40m Pick a color, use the up down arrow keys") print(" ") if C == 0: print("\033[1;37;40m White") elif C == 1: print("\033[1;36;40m Cyan") elif C == 2: print("\033[1;35;40m Purple") elif C == 3: print("\033[1;34;40m Blue") elif C == 4: print("\033[1;33;40m Yellow") elif C == 5: print("\033[1;32;40m Green") elif C == 6: print("\033[1;31;40m Red") Ent = getch() if Ent == b'\r': system("cls") if C == 0: Col = 37 elif C == 1: Col = 36 elif C == 2: Col = 35 elif C == 3: Col = 34 elif C == 4: Col = 33 elif C == 5: Col = 32 elif C == 6: Col = 31 if P == 0: AllFunctions.UpCACHE(7, Col, ToFile) return(Pone, Ptwo, Pthree, str(Col)) elif P == 1: AllFunctions.UpCACHE(4, Col, ToFile) return(str(Col), Ptwo, Pthree, Pzero) elif P == 2: AllFunctions.UpCACHE(5, Col, ToFile) return(Pone, str(Col), Pthree, Pzero) elif P == 3: AllFunctions.UpCACHE(6, Col, ToFile) return(Pone, Ptwo, str(Col), Pzero) elif Ent == b'\xe0': A = 1 print(A) elif Ent == b'P' and A == 1: A = 0 if C > 0: C = C-1 else: C = 6 elif Ent == b'H' and A == 1: A = 0 if C < 6: C = C+1 else: C = 0 else: A = 0 pass
import argparse from yaml import load, dump from itertools import groupby import features_pb2 from pandas import DataFrame from ckm import * import glob import logging import collections import os import subprocess from tabulate import tabulate import time import re import StringIO import csv from softmax import * from sklearn import metrics from sklearn.linear_model import SGDClassifier ''' Driver for Convolutional Kernel Machine experiments ''' def main(): # First parse arguments for the CKM parser = argparse.ArgumentParser(description='Convolutional Kernel Machine') parser.add_argument('config', help='path to config file for experiment') parser.add_argument('-b', '--benchmark', action='store_const', const=True) args = parser.parse_args() exp = parse_experiment(args.config) conf_file = open("/tmp/ckn_" + str(time.time()), "w+") dump(exp, conf_file) conf_file.close() logging.info('Experiment mode: {0}'.format(exp.get("mode"))) if (exp.get("mode") == "python"): results = python_run(exp) elif (exp.get("mode") == "scala"): results = scala_run(exp, args.config, args.benchmark) if (not (results is None)): print results.to_csv(header=True) def flatten_dict(d, parent_key='', sep='_'): ''' Borrowed from: http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys ''' items = [] for k, v in d.items(): if isinstance(v, collections.MutableMapping): items.extend(flatten_dict(v, k, sep=sep).items()) else: items.append((k, v)) return dict(items) def python_run(exp): if (exp.get("seed") == None): exp["seed"] = int(random.random*(2*32)) start_time = time.time() dataset = exp["dataset"] seed = exp["seed"] verbose = exp["verbose"] center = exp.get("center") (X_train, y_train), (X_test, y_test) = load_data(dataset, center) if (verbose): print "Data loaded Train shape: {0}, Test Shape: {1}, Train Labels shape: {2}, \ Test Labels shape {3}".format(X_train.shape, X_test.shape, y_train.shape, y_test.shape) X_train_lift, X_test_lift = gen_features(exp, X_train, X_test, seed) if (verbose): print "Data Featurized Train shape: {0}, Test Shape: {1}, Train Labels shape: {2}, \ Test Labels shape {3}".format(X_train_lift.shape, X_test_lift.shape, y_train.shape, y_test.shape) y_train_pred, y_test_pred = solve(exp, X_train_lift, y_train, X_test_lift, y_test, seed) runtime = time.time() - start_time results = compute_metrics(exp, y_train, y_train_pred, y_test, y_test_pred) results.insert(len(results.columns), "runtime", runtime) return results def scala_run(exp, yaml_path, benchmark): start_time = time.time() expid = exp["expid"] config_yaml = yaml_path env = os.environ.copy() # sanity check before running the process # if not os.path.isdir(outdir): # raise ValueError("output dir must exist") logfile = expid + ".spark.log" # if os.path.exists(logfile) and output_sanity_check: # raise ValueError("output dir has logfile, should be empty") if (benchmark): pipelineClass="pipelines.Benchmark" else: pipelineClass="pipelines.CKM" pipelineJar = "/home/eecs/vaishaal/ckm/keystone_pipeline/target/scala-2.10/ckm-assembly-0.1.jar" if not os.path.exists(pipelineJar): raise ValueError("Cannot find pipeline jar") # basically capturing the popen output and saving it to disk and # printing it to screen are a multithreaded nightmare, so we let # tee do the work for us yarn = exp.get("yarn") if (yarn): p = subprocess.Popen(" ".join(["./keystone_pipeline/bin/run-pipeline-yarn.sh", pipelineClass, pipelineJar, config_yaml]), shell=True, executable='/bin/bash') else: p = subprocess.Popen(" ".join(["./keystone_pipeline/bin/run-pipeline.sh", pipelineClass, pipelineJar, config_yaml]), shell=True, executable='/bin/bash') #p = subprocess.Popen(cmd, shell=True, executable='/bin/bash', env = env) p.wait() if p.returncode != 0: raise Exception("invocation terminated with non-zero exit status") if (exp["solve"]): y_train, y_train_weights, ids_train = load_scala_results("/tmp/ckm_train_results") y_test, y_test_weights, ids_test = load_scala_results("/tmp/ckm_test_results") # TODO: Do more interesting things here if exp.get('augment'): y_train_weights, y_test_weights = augmented_eval(y_train_weights, y_test_weights, ids_train, ids_test) y_train_pred = np.argmax(y_train_weights, axis=1) y_test_pred = np.argmax(y_test_weights, axis=1) runtime = time.time() - start_time results = compute_metrics(exp, y_train, y_train_pred, y_test, y_test_pred) results.insert(len(results.columns), "runtime", runtime) if (exp.get("numClasses", 2) >= 5): top5_train, top5_test = compute_top5_acc(y_train, y_train_weights, y_test, y_test_weights) results.insert(len(results.columns), "top5_train_acc", top5_train) results.insert(len(results.columns), "top5_test_acc", top5_test) return results else: return None def augmented_eval(y_train_weights, y_test_weights, ids_train, ids_test): y_train_weights_avg = np.array(map(lambda x: np.average(np.array(map(lambda y: y[1], x[1])), axis=0), groupby(zip(ids_train, y_train_weights), lambda z: z[0]))) y_test_weights_avg = np.array(map(lambda x: np.average(np.array(map(lambda y: y[1], x[1])), axis=0), groupby(zip(ids_test, y_test_weights), lambda z: z[0]))) return y_train_weights_avg, y_test_weights_avg def gen_features(exp, X_train, X_test, seed): ckm_run = build_ckm(exp, seed) X_train_lift, X_test_lift = ckm_run(X_train, X_test) X_train = X_train_lift.reshape(X_train.shape[0], -1) X_test = X_test_lift.reshape(X_test.shape[0], -1) return X_train, X_test def save_features_python(X, y, name): X = X.reshape(X.shape[0], -1) dataset = features_pb2.Dataset() dataset.name = name for i in range(len(y)): datum = dataset.data.add() datum.label = int(y[i]) datum.data.extend(X[i, 0:].tolist()) f = open(dataset.name + ".bin", "wb") f.write(dataset.SerializeToString()) f.close() def load_features_python(name): dataset = features_pb2.Dataset() f = open("{0}.bin".format(name), "rb") dataset.ParseFromString(f.read()) f.close() X = [] y = [] for datum in dataset.data: x_i = list(datum.data) y_i = datum.label X.append(x_i) y.append(y_i) return np.array(X), np.array(y) def load_scala_results(name): f = open(name, "r") result_lines = f.readlines() results = np.array(map(lambda x: map(lambda y: float(y), x.split(",")), result_lines)) labels = results[:, 1] ids = results[:,0] weights = results[:, 2:] return labels, weights, ids def load_all_features_from_dir(dirname): files = glob.glob(dirname + "/part*") all_features = [] all_labels = [] i = 0 for f in files: print "Part {0}".format(i) features, labels = load_features_from_text(f) all_features.extend(features) all_labels.extend(labels) i += 1 return np.vstack(all_features), np.hstack(all_labels) def load_features_from_text(fname): ts = time.time() with open(fname) as open_file: csv_string = open_file.read() csvstring_stripped = re.sub('\(|\)', '', csv_string) f = StringIO.StringIO(csvstring_stripped) reader = csv.reader(f, delimiter=',') lst = list(reader) array = np.array(lst) Y = array[:,-1].astype('int32') X = array[:,:-1].astype('float64') print "Loading file took " + str(time.time() - ts) return X, Y def save_text_features_as_npz(fname_train, fname_test): print("Loading Train Features") X_train, y_train = load_all_features_from_dir(fname_train) print("Loading Test Features") X_test, y_test = load_all_features_from_dir(fname_test) train_file = open(fname_train + ".npz", "w+") test_file = open(fname_test + ".npz", "w+") print("Saving Train Features") np.savez(train_file, X_train=X_train, y_train=y_train) print("Saving Test Features") np.savez(test_file, X_test=X_test, y_test=y_test) def scp_features_to_c78(fname_train, fname_test, path="/work/vaishaal"): save_text_features_as_npz(fname_train, fname_test) print("Moving features to c78") p = subprocess.Popen(" ".join(["scp", fname_train +".npz", "c78.millennium.berkeley.edu:{0}".format(path)]), shell=True, executable='/bin/bash') p.wait() p = subprocess.Popen(" ".join(["scp", fname_test +".npz", "c78.millennium.berkeley.edu:{0}".format(path)]), shell=True, executable='/bin/bash') p.wait() if p.returncode != 0: raise Exception("invocation terminated with non-zero exit status") def compute_metrics(exp, y_train, y_train_pred, y_test, y_test_pred): exp_flatten = flatten_dict(exp) exp_flatten = dict(map(lambda x: (x[0], [str(x[1])]), exp_flatten.items())) df = DataFrame.from_dict(exp_flatten) train_acc = metrics.accuracy_score(y_train, y_train_pred) test_acc = metrics.accuracy_score(y_test, y_test_pred) df.insert(len(df.columns), "train_acc", train_acc) df.insert(len(df.columns), "test_acc", test_acc) return df def compute_top5_acc(y_train, y_train_weights, y_test, y_test_weights): top5_train = y_train_weights.argsort(axis=1)[:,-5:] top5_test = y_test_weights.argsort(axis=1)[:,-5:] train_res = np.any(np.equal(y_train[:,np.newaxis],top5_train), axis=1) test_res = np.any(np.equal(y_test[:,np.newaxis],top5_test), axis=1) test_acc = test_res.sum()/(1.0*len(test_res)) train_acc = train_res.sum()/(1.0*len(train_res)) return train_acc, test_acc def build_ckm(exp, seed): layers = exp.get("layers") filters = exp.get("filters") bandwidth = exp.get("bandwidth") patch_sizes = exp.get("patch_sizes") verbose = exp.get("verbose") pool = exp.get("pool") channels = exp.get("numChannels", 1) def ckm_run(X_train, X_test): for i in range(layers): if (i == 0 and exp.get("whiten")): whiten = True else: whiten = False patch_shape = (patch_sizes[i], patch_sizes[i]) X_train, X_test = ckm_apply(X_train, X_test, patch_shape, bandwidth[i], filters[i], pool=pool[i], random_state=(seed+i), whiten=whiten, numChannels=channels) return X_train, X_test return ckm_run def solve(exp, X_train, y_train, X_test, y_test, seed): X_train = X_train.reshape(X_train.shape[0], -1) X_test = X_test.reshape(X_test.shape[0], -1) loss = exp["loss"] reg = exp["reg"] verbose = exp["verbose"] if (loss == "softmax"): y_train_pred, y_test_pred = softmax_gn(X_train, y_train, X_test, y_test, reg, verbose=True) else: clf = SGDClassifier(loss=loss, random_state=RANDOM_STATE, alpha=reg, verbose=int(verbose)) clf.fit(X_train, y_train) y_train_pred = clf.predict(X_train) y_test_pred = clf.predict(X_test) return y_train_pred, y_test_pred def parse_experiment(config_file): return load(open(config_file)) if __name__ == "__main__": main()
#!/usr/bin/python # Software License Agreement (BSD License) # # Copyright (c) 2012, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. '''This file generates shell code for the setup.SHELL scripts to set environment variables''' from __future__ import print_function import argparse import copy import errno import os import platform import sys CATKIN_MARKER_FILE = '.catkin' system = platform.system() IS_DARWIN = (system == 'Darwin') IS_WINDOWS = (system == 'Windows') # subfolder of workspace prepended to CMAKE_PREFIX_PATH ENV_VAR_SUBFOLDERS = { 'CMAKE_PREFIX_PATH': '', 'CPATH': 'include', 'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')], 'PATH': 'bin', 'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')], 'PYTHONPATH': 'lib/python2.7/dist-packages', } def rollback_env_variables(environ, env_var_subfolders): ''' Generate shell code to reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH. This does not cover modifications performed by environment hooks. ''' lines = [] unmodified_environ = copy.copy(environ) for key in sorted(env_var_subfolders.keys()): subfolders = env_var_subfolders[key] if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: value = _rollback_env_variable(unmodified_environ, key, subfolder) if value is not None: environ[key] = value lines.append(assignment(key, value)) if lines: lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH')) return lines def _rollback_env_variable(environ, name, subfolder): ''' For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder. :param subfolder: str '' or subfoldername that may start with '/' :returns: the updated value of the environment variable. ''' value = environ[name] if name in environ else '' env_paths = [path for path in value.split(os.pathsep) if path] value_modified = False if subfolder: if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)): subfolder = subfolder[1:] if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)): subfolder = subfolder[:-1] for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True): path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path path_to_remove = None for env_path in env_paths: env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path if env_path_clean == path_to_find: path_to_remove = env_path break if path_to_remove: env_paths.remove(path_to_remove) value_modified = True new_value = os.pathsep.join(env_paths) return new_value if value_modified else None def _get_workspaces(environ, include_fuerte=False, include_non_existing=False): ''' Based on CMAKE_PREFIX_PATH return all catkin workspaces. :param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool`` ''' # get all cmake prefix paths env_name = 'CMAKE_PREFIX_PATH' value = environ[env_name] if env_name in environ else '' paths = [path for path in value.split(os.pathsep) if path] # remove non-workspace paths workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))] return workspaces def prepend_env_variables(environ, env_var_subfolders, workspaces): ''' Generate shell code to prepend environment variables for the all workspaces. ''' lines = [] lines.append(comment('prepend folders of workspaces to environment variables')) paths = [path for path in workspaces.split(os.pathsep) if path] prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '') lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix)) for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']): subfolder = env_var_subfolders[key] prefix = _prefix_env_variable(environ, key, paths, subfolder) lines.append(prepend(environ, key, prefix)) return lines def _prefix_env_variable(environ, name, paths, subfolders): ''' Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items. ''' value = environ[name] if name in environ else '' environ_paths = [path for path in value.split(os.pathsep) if path] checked_paths = [] for path in paths: if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: path_tmp = path if subfolder: path_tmp = os.path.join(path_tmp, subfolder) # exclude any path already in env and any path we already added if path_tmp not in environ_paths and path_tmp not in checked_paths: checked_paths.append(path_tmp) prefix_str = os.pathsep.join(checked_paths) if prefix_str != '' and environ_paths: prefix_str += os.pathsep return prefix_str def assignment(key, value): if not IS_WINDOWS: return 'export %s="%s"' % (key, value) else: return 'set %s=%s' % (key, value) def comment(msg): if not IS_WINDOWS: return '# %s' % msg else: return 'REM %s' % msg def prepend(environ, key, prefix): if key not in environ or not environ[key]: return assignment(key, prefix) if not IS_WINDOWS: return 'export %s="%s$%s"' % (key, prefix, key) else: return 'set %s=%s%%%s%%' % (key, prefix, key) def find_env_hooks(environ, cmake_prefix_path): ''' Generate shell code with found environment hooks for the all workspaces. ''' lines = [] lines.append(comment('found environment hooks in workspaces')) generic_env_hooks = [] generic_env_hooks_workspace = [] specific_env_hooks = [] specific_env_hooks_workspace = [] generic_env_hooks_by_filename = {} specific_env_hooks_by_filename = {} generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh' specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None # remove non-workspace paths workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))] for workspace in reversed(workspaces): env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d') if os.path.isdir(env_hook_dir): for filename in sorted(os.listdir(env_hook_dir)): if filename.endswith('.%s' % generic_env_hook_ext): # remove previous env hook with same name if present if filename in generic_env_hooks_by_filename: i = generic_env_hooks.index(generic_env_hooks_by_filename[filename]) generic_env_hooks.pop(i) generic_env_hooks_workspace.pop(i) # append env hook generic_env_hooks.append(os.path.join(env_hook_dir, filename)) generic_env_hooks_workspace.append(workspace) generic_env_hooks_by_filename[filename] = generic_env_hooks[-1] elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext): # remove previous env hook with same name if present if filename in specific_env_hooks_by_filename: i = specific_env_hooks.index(specific_env_hooks_by_filename[filename]) specific_env_hooks.pop(i) specific_env_hooks_workspace.pop(i) # append env hook specific_env_hooks.append(os.path.join(env_hook_dir, filename)) specific_env_hooks_workspace.append(workspace) specific_env_hooks_by_filename[filename] = specific_env_hooks[-1] env_hooks = generic_env_hooks + specific_env_hooks env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace count = len(env_hooks) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count)) for i in range(count): lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i])) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i])) return lines def _parse_arguments(args=None): parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.') parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context') return parser.parse_known_args(args=args)[0] if __name__ == '__main__': try: try: args = _parse_arguments() except Exception as e: print(e, file=sys.stderr) sys.exit(1) # environment at generation time CMAKE_PREFIX_PATH = '/home/perdziu/catkin_ws/devel;/opt/ros/indigo'.split(';') # prepend current workspace if not already part of CPP base_path = os.path.dirname(__file__) if base_path not in CMAKE_PREFIX_PATH: CMAKE_PREFIX_PATH.insert(0, base_path) CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH) environ = dict(os.environ) lines = [] if not args.extend: lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS) lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH) lines += find_env_hooks(environ, CMAKE_PREFIX_PATH) print('\n'.join(lines)) # need to explicitly flush the output sys.stdout.flush() except IOError as e: # and catch potantial "broken pipe" if stdout is not writable # which can happen when piping the output to a file but the disk is full if e.errno == errno.EPIPE: print(e, file=sys.stderr) sys.exit(2) raise sys.exit(0)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """File processing utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import errno import functools import glob as _glob import os import shutil import threading import six class _GFileBase(six.Iterator): """Base I/O wrapper class. Similar semantics to Python's file object.""" # pylint: disable=protected-access def _synchronized(fn): """Synchronizes file I/O for methods in GFileBase.""" @functools.wraps(fn) def sync(self, *args, **kwargs): # Sometimes a GFileBase method is called before the instance # has been properly initialized. Check that _locker is available. if hasattr(self, '_locker'): self._locker.lock() try: return fn(self, *args, **kwargs) finally: if hasattr(self, '_locker'): self._locker.unlock() return sync # pylint: enable=protected-access def __init__(self, name, mode, locker): """Create the GFileBase object with the given filename, mode, and locker. Args: name: string, the filename. mode: string, the mode to open the file with (e.g. "r", "w", "a+"). locker: the thread locking object (e.g. _PythonLocker) for controlling thread access to the I/O methods of this class. """ self._name = name self._mode = mode self._locker = locker self._fp = open(name, mode) def __enter__(self): """Make GFileBase usable with "with" statement.""" return self def __exit__(self, unused_type, unused_value, unused_traceback): """Make GFileBase usable with "with" statement.""" self.close() @_synchronized def __del__(self): # __del__ is sometimes called before initialization, in which # case the object is not fully constructed. Check for this here # before trying to close the file handle. if hasattr(self, '_fp'): self._fp.close() @_synchronized def flush(self): """Flush the underlying file handle.""" return self._fp.flush() @property @_synchronized def closed(self): """Returns "True" if the file handle is closed. Otherwise False.""" return self._fp.closed @_synchronized def write(self, data): """Write data to the underlying file handle. Args: data: The string to write to the file handle. """ self._fp.write(data) @_synchronized def writelines(self, seq): """Write a sequence of strings to the underlying file handle.""" self._fp.writelines(seq) @_synchronized def tell(self): """Return the location from the underlying file handle. Returns: An integer location (which can be used in e.g., seek). """ return self._fp.tell() @_synchronized def seek(self, offset, whence=0): """Seek to offset (conditioned on whence) in the underlying file handle. Args: offset: int, the offset within the file to seek to. whence: 0, 1, or 2. See python's seek() documentation for details. """ self._fp.seek(offset, whence) @_synchronized def truncate(self, new_size=None): """Truncate the underlying file handle to new_size. Args: new_size: Size after truncation. If None, the file handle is truncated to 0 bytes. """ self._fp.truncate(new_size) @_synchronized def readline(self, max_length=-1): """Read a single line (up to max_length) from the underlying file handle. Args: max_length: The maximum number of chsaracters to read. Returns: A string, including any newline at the end, or empty string if at EOF. """ return self._fp.readline(max_length) @_synchronized def readlines(self, sizehint=None): """Read lines from the underlying file handle. Args: sizehint: See the python file.readlines() documentation. Returns: A list of strings from the underlying file handle. """ if sizehint is not None: return self._fp.readlines(sizehint) else: return self._fp.readlines() def __iter__(self): """Enable line iteration on the underlying handle (not synchronized).""" return self # Not synchronized def __next__(self): """Enable line iteration on the underlying handle (not synchronized). Returns: An line iterator from the underlying handle. Example: # read a file's lines by consuming the iterator with a list with open("filename", "r") as fp: lines = list(fp) """ return next(self._fp) @_synchronized def Size(self): # pylint: disable=invalid-name """Get byte size of the file from the underlying file handle.""" cur = self.tell() try: self.seek(0, 2) size = self.tell() finally: self.seek(cur) return size @_synchronized def read(self, n=-1): """Read n bytes from the underlying file handle. Args: n: Number of bytes to read (if negative, read to end of file handle.) Returns: A string of the bytes read, up to the end of file. """ return self._fp.read(n) @_synchronized def close(self): """Close the underlying file handle.""" self._fp.close() # Declare wrappers as staticmethods at the end so that we can # use them as decorators. _synchronized = staticmethod(_synchronized) class GFile(_GFileBase): """File I/O wrappers with thread locking.""" def __init__(self, name, mode='r'): super(GFile, self).__init__(name, mode, _Pythonlocker()) class FastGFile(_GFileBase): """File I/O wrappers without thread locking.""" def __init__(self, name, mode='r'): super(FastGFile, self).__init__(name, mode, _Nulllocker()) # locker classes. Note that locks must be reentrant, so that multiple # lock() calls by the owning thread will not block. class _Pythonlocker(object): """A locking strategy that uses standard locks from the thread module.""" def __init__(self): self._lock = threading.RLock() def lock(self): self._lock.acquire() def unlock(self): self._lock.release() class _Nulllocker(object): """A locking strategy where lock() and unlock() methods are no-ops.""" def lock(self): pass def unlock(self): pass def Exists(path): # pylint: disable=invalid-name """Returns True iff "path" exists (as a dir, file, non-broken symlink).""" return os.path.exists(path) def IsDirectory(path): # pylint: disable=invalid-name """Return True iff "path" exists and is a directory.""" return os.path.isdir(path) def Glob(glob): # pylint: disable=invalid-name """Return a list of filenames matching the glob "glob".""" return _glob.glob(glob) def MkDir(path, mode=0o755): # pylint: disable=invalid-name """Create the directory "path" with the given mode. Args: path: The directory path mode: The file mode for the directory Returns: None Raises: OSError: if the path already exists """ os.mkdir(path, mode) def MakeDirs(path, mode=0o755): # pylint: disable=invalid-name """Recursively create the directory "path" with the given mode. Args: path: The directory path. mode: The file mode for the created directories Raises: OSError: if the path already exists """ # NOTE(mrry): MakeDirs("") should be a no-op to match other # implementations of tf.gfile. if path: try: os.makedirs(path, mode) except OSError as e: if e.errno != errno.EEXIST: raise def RmDir(directory): # pylint: disable=invalid-name """Removes the directory "directory" iff the directory is empty. Args: directory: The directory to remove. Raises: OSError: If the directory does not exist or is not empty. """ os.rmdir(directory) def Remove(path): # pylint: disable=invalid-name """Delete the (non-directory) file "path". Args: path: The file to remove. Raises: OSError: If "path" does not exist, is a directory, or cannot be deleted. """ os.remove(path) def Rename(oldpath, newpath, overwrite=False): """Rename or move a file, or a local directory. Args: oldpath: string; a pathname of a file. newpath: string; a pathname to which the file will be moved. overwrite: boolean; if false, it is an error for newpath to be occupied by an existing file. Raises: OSError: If "newpath" is occupied by an existing file and overwrite=False. """ if not overwrite and Exists(newpath) and not IsDirectory(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) os.rename(oldpath, newpath) def DeleteRecursively(path): # pylint: disable=invalid-name """Delete the file or directory "path" recursively. Args: path: The path to remove (may be a non-empty directory). Raises: OSError: If the path does not exist or cannot be deleted. """ if IsDirectory(path): shutil.rmtree(path) else: Remove(path) def ListDirectory(directory, return_dotfiles=False): # pylint: disable=invalid-name """Returns a list of files in dir. As with the standard os.listdir(), the filenames in the returned list will be the basenames of the files in dir (not absolute paths). To get a list of absolute paths of files in a directory, a client could do: file_list = gfile.ListDir(my_dir) file_list = [os.path.join(my_dir, f) for f in file_list] (assuming that my_dir itself specified an absolute path to a directory). Args: directory: the directory to list return_dotfiles: if True, dotfiles will be returned as well. Even if this arg is True, '.' and '..' will not be returned. Returns: ['list', 'of', 'files']. The entries '.' and '..' are never returned. Other entries starting with a dot will only be returned if return_dotfiles is True. Raises: OSError: if there is an error retrieving the directory listing. """ files = os.listdir(directory) if not return_dotfiles: files = [f for f in files if not f.startswith('.')] return files def Walk(top, topdown=1, onerror=None): """Recursive directory tree generator. Args: top: string, a pathname. topdown: bool, should traversal be pre-order (True) or post-order (False) onerror: function, optional callback for errors. By default, errors that occur when listing a directory are ignored. (This is the same semantics as Python's os.walk() generator.) If the optional argument "onerror" is specified, it should be a function. It will be called with one argument, an os.error instance. It can return to continue with the walk, or reraise the exception to abort the walk. By default, the walk follows symlinks that resolve into directories. Yields: # Each yield is a 3-tuple: the pathname of a directory, followed # by lists of all its subdirectories and leaf files. (dirname, [subdirname, subdirname, ...], [filename, filename, ...]) """ return os.walk(top, topdown=topdown, onerror=onerror, followlinks=True) def Stat(path): # pylint: disable=invalid-name """Gets the status of a file. Args: path: The file to call Stat() on. Does the equivalent of Stat() on the specified "path" and return file properties. Returns: An object whose attributes give information on the file. Raises: OSError: If "path" does not exist. """ statinfo = os.stat(path) filestat = collections.namedtuple('FileStat', ['mtime']) filestat.mtime = statinfo.st_mtime return filestat def Copy(oldpath, newpath, overwrite=False): """Copy a file. Args: oldpath: string; a pathname of a file. newpath: string; a pathname to which the file will be copied. overwrite: boolean; if false, it is an error for newpath to be occupied by an existing file. Raises: OSError: If "newpath" is occupied by an existing file and overwrite=False, or any error thrown by shutil.copy. """ if not overwrite and Exists(newpath): raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath) shutil.copy(oldpath, newpath) def Open(name, mode='r'): """Exact API match to the standard open. Args: name: a file name, either local or a gfile compatible. mode: for example "w" to open the file for writing. Returns: A threadsafe gfile.GFile object. """ return GFile(name, mode=mode)
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suite for VMwareAPI. """ import collections import contextlib import copy import datetime import time import mock import mox from oslo.config import cfg import suds from nova import block_device from nova.compute import api as compute_api from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import exception from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova.openstack.common import units from nova.openstack.common import uuidutils from nova import test from nova.tests import fake_instance import nova.tests.image.fake from nova.tests import matchers from nova.tests import test_flavors from nova.tests import utils from nova.tests.virt.vmwareapi import stubs from nova import utils as nova_utils from nova.virt import driver as v_driver from nova.virt import fake from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import fake as vmwareapi_fake from nova.virt.vmwareapi import imagecache from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vmops from nova.virt.vmwareapi import vmware_images from nova.virt.vmwareapi import volume_util from nova.virt.vmwareapi import volumeops CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') CONF.import_opt('remove_unused_original_minimum_age_seconds', 'nova.virt.imagecache') class fake_vm_ref(object): def __init__(self): self.value = 4 self._type = 'VirtualMachine' class fake_service_content(object): def __init__(self): self.ServiceContent = vmwareapi_fake.DataObject() self.ServiceContent.fake = 'fake' class VMwareSudsTest(test.NoDBTestCase): def setUp(self): super(VMwareSudsTest, self).setUp() def new_client_init(self, url, **kwargs): return mock.patch.object(suds.client.Client, '__init__', new=new_client_init).start() self.vim = self._vim_create() self.addCleanup(mock.patch.stopall) def _vim_create(self): def fake_retrieve_service_content(fake): return fake_service_content() self.stubs.Set(vim.Vim, 'retrieve_service_content', fake_retrieve_service_content) return vim.Vim() def test_exception_with_deepcopy(self): self.assertIsNotNone(self.vim) self.assertRaises(error_util.VimException, copy.deepcopy, self.vim) class VMwareSessionTestCase(test.NoDBTestCase): def _fake_is_vim_object(self, module): return True @mock.patch('time.sleep') def test_call_method_vim_fault(self, mock_sleep): def _fake_create_session(self): session = vmwareapi_fake.DataObject() session.key = 'fake_key' session.userName = 'fake_username' self._session = session def _fake_session_is_active(self): return False with contextlib.nested( mock.patch.object(driver.VMwareAPISession, '_is_vim_object', self._fake_is_vim_object), mock.patch.object(driver.VMwareAPISession, '_create_session', _fake_create_session), mock.patch.object(driver.VMwareAPISession, '_session_is_active', _fake_session_is_active) ) as (_fake_vim, _fake_create, _fake_is_active): api_session = driver.VMwareAPISession() args = () kwargs = {} self.assertRaises(error_util.VimFaultException, api_session._call_method, stubs, 'fake_temp_method_exception', *args, **kwargs) def test_call_method_vim_empty(self): def _fake_create_session(self): session = vmwareapi_fake.DataObject() session.key = 'fake_key' session.userName = 'fake_username' self._session = session def _fake_session_is_active(self): return True with contextlib.nested( mock.patch.object(driver.VMwareAPISession, '_is_vim_object', self._fake_is_vim_object), mock.patch.object(driver.VMwareAPISession, '_create_session', _fake_create_session), mock.patch.object(driver.VMwareAPISession, '_session_is_active', _fake_session_is_active) ) as (_fake_vim, _fake_create, _fake_is_active): api_session = driver.VMwareAPISession() args = () kwargs = {} res = api_session._call_method(stubs, 'fake_temp_method_exception', *args, **kwargs) self.assertEqual([], res) @mock.patch('time.sleep') def test_call_method_session_exception(self, mock_sleep): def _fake_create_session(self): session = vmwareapi_fake.DataObject() session.key = 'fake_key' session.userName = 'fake_username' self._session = session with contextlib.nested( mock.patch.object(driver.VMwareAPISession, '_is_vim_object', self._fake_is_vim_object), mock.patch.object(driver.VMwareAPISession, '_create_session', _fake_create_session), ) as (_fake_vim, _fake_create): api_session = driver.VMwareAPISession() args = () kwargs = {} self.assertRaises(error_util.SessionConnectionException, api_session._call_method, stubs, 'fake_temp_session_exception', *args, **kwargs) def test_call_method_session_file_exists_exception(self): def _fake_create_session(self): session = vmwareapi_fake.DataObject() session.key = 'fake_key' session.userName = 'fake_username' self._session = session with contextlib.nested( mock.patch.object(driver.VMwareAPISession, '_is_vim_object', self._fake_is_vim_object), mock.patch.object(driver.VMwareAPISession, '_create_session', _fake_create_session), ) as (_fake_vim, _fake_create): api_session = driver.VMwareAPISession() args = () kwargs = {} self.assertRaises(error_util.FileAlreadyExistsException, api_session._call_method, stubs, 'fake_session_file_exception', *args, **kwargs) def test_call_method_session_no_permission_exception(self): def _fake_create_session(self): session = vmwareapi_fake.DataObject() session.key = 'fake_key' session.userName = 'fake_username' self._session = session with contextlib.nested( mock.patch.object(driver.VMwareAPISession, '_is_vim_object', self._fake_is_vim_object), mock.patch.object(driver.VMwareAPISession, '_create_session', _fake_create_session), ) as (_fake_vim, _fake_create): api_session = driver.VMwareAPISession() args = () kwargs = {} e = self.assertRaises(error_util.NoPermissionException, api_session._call_method, stubs, 'fake_session_permission_exception', *args, **kwargs) fault_string = 'Permission to perform this operation was denied.' details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'} exception_string = '%s %s' % (fault_string, details) self.assertEqual(exception_string, str(e)) class VMwareAPIConfTestCase(test.NoDBTestCase): """Unit tests for VMWare API configurations.""" def setUp(self): super(VMwareAPIConfTestCase, self).setUp() vm_util.vm_refs_cache_reset() def tearDown(self): super(VMwareAPIConfTestCase, self).tearDown() def test_configure_without_wsdl_loc_override(self): # Test the default configuration behavior. By default, # use the WSDL sitting on the host we are talking to in # order to bind the SOAP client. wsdl_loc = cfg.CONF.vmware.wsdl_location self.assertIsNone(wsdl_loc) wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com") url = vim.Vim.get_soap_url("https", "www.example.com") self.assertEqual("https://www.example.com/sdk/vimService.wsdl", wsdl_url) self.assertEqual("https://www.example.com/sdk", url) def test_configure_without_wsdl_loc_override_using_ipv6(self): # Same as above but with ipv6 based host ip wsdl_loc = cfg.CONF.vmware.wsdl_location self.assertIsNone(wsdl_loc) wsdl_url = vim.Vim.get_wsdl_url("https", "::1") url = vim.Vim.get_soap_url("https", "::1") self.assertEqual("https://[::1]/sdk/vimService.wsdl", wsdl_url) self.assertEqual("https://[::1]/sdk", url) def test_configure_with_wsdl_loc_override(self): # Use the setting vmwareapi_wsdl_loc to override the # default path to the WSDL. # # This is useful as a work-around for XML parsing issues # found when using some WSDL in combination with some XML # parsers. # # The wsdl_url should point to a different host than the one we # are actually going to send commands to. fake_wsdl = "https://www.test.com/sdk/foo.wsdl" self.flags(wsdl_location=fake_wsdl, group='vmware') wsdl_loc = cfg.CONF.vmware.wsdl_location self.assertIsNotNone(wsdl_loc) self.assertEqual(fake_wsdl, wsdl_loc) wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com") url = vim.Vim.get_soap_url("https", "www.example.com") self.assertEqual(fake_wsdl, wsdl_url) self.assertEqual("https://www.example.com/sdk", url) class VMwareAPIVMTestCase(test.NoDBTestCase): """Unit tests for Vmware API connection calls.""" def setUp(self): super(VMwareAPIVMTestCase, self).setUp() vm_util.vm_refs_cache_reset() self.context = context.RequestContext('fake', 'fake', is_admin=False) self.flags(host_ip='test_url', host_username='test_username', host_password='test_pass', datastore_regex='.*', api_retry_count=1, use_linked_clone=False, group='vmware') self.flags(vnc_enabled=False, image_cache_subdirectory_name='vmware_base', my_ip='') self.user_id = 'fake' self.project_id = 'fake' self.node_name = 'test_url' self.ds = 'ds1' self.context = context.RequestContext(self.user_id, self.project_id) stubs.set_stubs(self.stubs) vmwareapi_fake.reset() self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI) # NOTE(vish): none of the network plugging code is actually # being tested self.network_info = utils.get_test_network_info() self.image = { 'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c', 'disk_format': 'vmdk', 'size': 512, } nova.tests.image.fake.stub_out_image_service(self.stubs) self.vnc_host = 'test_url' self._set_exception_vars() def tearDown(self): super(VMwareAPIVMTestCase, self).tearDown() vmwareapi_fake.cleanup() nova.tests.image.fake.FakeImageService_reset() def _set_exception_vars(self): self.wait_task = self.conn._session._wait_for_task self.call_method = self.conn._session._call_method self.task_ref = None self.exception = False def test_driver_capabilities(self): self.assertTrue(self.conn.capabilities['has_imagecache']) self.assertFalse(self.conn.capabilities['supports_recreate']) def test_login_retries(self): self.attempts = 0 self.login_session = vmwareapi_fake.FakeVim()._login() def _fake_login(_self): self.attempts += 1 if self.attempts == 1: raise exception.NovaException('Here is my fake exception') return self.login_session def _fake_check_session(_self): return True self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login) self.stubs.Set(time, 'sleep', lambda x: None) self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session', _fake_check_session) self.conn = driver.VMwareAPISession() self.assertEqual(self.attempts, 2) def test_wait_for_task_exception(self): self.flags(task_poll_interval=1, group='vmware') self.login_session = vmwareapi_fake.FakeVim()._login() self.stop_called = 0 def _fake_login(_self): return self.login_session self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login) def fake_poll_task(task_ref, done): done.send_exception(exception.NovaException('fake exception')) def fake_stop_loop(loop): self.stop_called += 1 return loop.stop() self.conn = driver.VMwareAPISession() self.stubs.Set(self.conn, "_poll_task", fake_poll_task) self.stubs.Set(self.conn, "_stop_loop", fake_stop_loop) self.assertRaises(exception.NovaException, self.conn._wait_for_task, 'fake-ref') self.assertEqual(self.stop_called, 1) def _get_instance_type_by_name(self, type): for instance_type in test_flavors.DEFAULT_FLAVORS: if instance_type['name'] == type: return instance_type if type == 'm1.micro': return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None, 'name': 'm1.micro', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '1', 'vcpu_weight': None, 'id': 2} def _create_instance(self, node=None, set_image_ref=True, uuid=None, instance_type='m1.large'): if not node: node = self.node_name if not uuid: uuid = uuidutils.generate_uuid() self.type_data = self._get_instance_type_by_name(instance_type) values = {'name': 'fake_name', 'id': 1, 'uuid': uuid, 'project_id': self.project_id, 'user_id': self.user_id, 'kernel_id': "fake_kernel_uuid", 'ramdisk_id': "fake_ramdisk_uuid", 'mac_address': "de:ad:be:ef:be:ef", 'flavor': instance_type, 'node': node, 'memory_mb': self.type_data['memory_mb'], 'root_gb': self.type_data['root_gb'], 'ephemeral_gb': self.type_data['ephemeral_gb'], 'vcpus': self.type_data['vcpus'], 'swap': self.type_data['swap'], } if set_image_ref: values['image_ref'] = "fake_image_uuid" self.instance_node = node self.uuid = uuid self.instance = fake_instance.fake_instance_obj( self.context, **values) def _create_vm(self, node=None, num_instances=1, uuid=None, instance_type='m1.large'): """Create and spawn the VM.""" if not node: node = self.node_name self._create_instance(node=node, uuid=uuid, instance_type=instance_type) self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid)) self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=None) self._check_vm_record(num_instances=num_instances) self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid)) def _check_vm_record(self, num_instances=1): """Check if the spawned VM's properties correspond to the instance in the db. """ instances = self.conn.list_instances() self.assertEqual(len(instances), num_instances) # Get Nova record for VM vm_info = self.conn.get_info({'uuid': self.uuid, 'name': 1, 'node': self.instance_node}) # Get record for VM vms = vmwareapi_fake._get_objects("VirtualMachine") for vm in vms.objects: if vm.get('name') == self.uuid: break # Check that m1.large above turned into the right thing. mem_kib = long(self.type_data['memory_mb']) << 10 vcpus = self.type_data['vcpus'] self.assertEqual(vm_info['max_mem'], mem_kib) self.assertEqual(vm_info['mem'], mem_kib) self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid) self.assertEqual(vm.get("summary.config.numCpu"), vcpus) self.assertEqual(vm.get("summary.config.memorySizeMB"), self.type_data['memory_mb']) self.assertEqual( vm.get("config.hardware.device")[2].device.obj_name, "ns0:VirtualE1000") # Check that the VM is running according to Nova self.assertEqual(vm_info['state'], power_state.RUNNING) # Check that the VM is running according to vSphere API. self.assertEqual(vm.get("runtime.powerState"), 'poweredOn') found_vm_uuid = False found_iface_id = False for c in vm.get("config.extraConfig").OptionValue: if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']): found_vm_uuid = True if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"): found_iface_id = True self.assertTrue(found_vm_uuid) self.assertTrue(found_iface_id) def _check_vm_info(self, info, pwr_state=power_state.RUNNING): """Check if the get_info returned values correspond to the instance object in the db. """ mem_kib = long(self.type_data['memory_mb']) << 10 self.assertEqual(info["state"], pwr_state) self.assertEqual(info["max_mem"], mem_kib) self.assertEqual(info["mem"], mem_kib) self.assertEqual(info["num_cpu"], self.type_data['vcpus']) def test_list_instances(self): instances = self.conn.list_instances() self.assertEqual(len(instances), 0) def test_list_instances_1(self): self._create_vm() instances = self.conn.list_instances() self.assertEqual(len(instances), 1) def test_list_instance_uuids(self): self._create_vm() uuids = self.conn.list_instance_uuids() self.assertEqual(len(uuids), 1) def test_list_instance_uuids_invalid_uuid(self): self._create_vm(uuid='fake_id') uuids = self.conn.list_instance_uuids() self.assertEqual(len(uuids), 0) def _cached_files_exist(self, exists=True): cache = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.vmdk' % self.ds) if exists: self.assertTrue(vmwareapi_fake.get_file(cache)) else: self.assertFalse(vmwareapi_fake.get_file(cache)) def test_instance_dir_disk_created(self): """Test image file is cached when even when use_linked_clone is False """ self._create_vm() inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) cache = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.vmdk' % self.ds) self.assertTrue(vmwareapi_fake.get_file(inst_file_path)) self._cached_files_exist() def test_cache_dir_disk_created(self): """Test image disk is cached when use_linked_clone is True.""" self.flags(use_linked_clone=True, group='vmware') self._create_vm() file = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.vmdk' % self.ds) root = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.80.vmdk' % self.ds) self.assertTrue(vmwareapi_fake.get_file(file)) self.assertTrue(vmwareapi_fake.get_file(root)) def _iso_disk_type_created(self, instance_type='m1.large'): self.image['disk_format'] = 'iso' self._create_vm(instance_type=instance_type) file = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.iso' % self.ds) self.assertTrue(vmwareapi_fake.get_file(file)) def test_iso_disk_type_created(self): self._iso_disk_type_created() vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) self.assertTrue(vmwareapi_fake.get_file(vmdk_file_path)) def test_iso_disk_type_created_with_root_gb_0(self): self._iso_disk_type_created(instance_type='m1.micro') vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) self.assertFalse(vmwareapi_fake.get_file(vmdk_file_path)) def test_iso_disk_cdrom_attach(self): self.iso_path = ( '[%s] vmware_base/fake_image_uuid/fake_image_uuid.iso' % self.ds) def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): self.assertEqual(iso_uploaded_path, self.iso_path) self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", fake_attach_cdrom) self.image['disk_format'] = 'iso' self._create_vm() def test_iso_disk_cdrom_attach_with_config_drive(self): self.flags(force_config_drive=True) self.iso_path = [ ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.iso' % self.ds), '[%s] fake-config-drive' % self.ds] self.iso_unit_nos = [0, 1] self.iso_index = 0 def fake_create_config_drive(instance, injected_files, password, data_store_name, folder, uuid, cookies): return 'fake-config-drive' def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): self.assertEqual(iso_uploaded_path, self.iso_path[self.iso_index]) self.iso_index += 1 self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", fake_attach_cdrom) self.stubs.Set(self.conn._vmops, '_create_config_drive', fake_create_config_drive) self.image['disk_format'] = 'iso' self._create_vm() self.assertEqual(self.iso_index, 2) def test_cdrom_attach_with_config_drive(self): self.flags(force_config_drive=True) self.iso_path = '[%s] fake-config-drive' % self.ds self.cd_attach_called = False def fake_create_config_drive(instance, injected_files, password, data_store_name, folder, uuid, cookies): return 'fake-config-drive' def fake_attach_cdrom(vm_ref, instance, data_store_ref, iso_uploaded_path): self.assertEqual(iso_uploaded_path, self.iso_path) self.cd_attach_called = True self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm", fake_attach_cdrom) self.stubs.Set(self.conn._vmops, '_create_config_drive', fake_create_config_drive) self._create_vm() self.assertTrue(self.cd_attach_called) def test_spawn(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def _spawn_with_delete_exception(self, fault=None): def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "DeleteDatastoreFile_Task": self.exception = True task_mdo = vmwareapi_fake.create_task(method, "error", error_fault=fault) return task_mdo.obj return task_ref with ( mock.patch.object(self.conn._session, '_call_method', fake_call_method) ): if fault: self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) else: self.assertRaises(error_util.VMwareDriverException, self._create_vm) self.assertTrue(self.exception) def test_spawn_with_delete_exception_not_found(self): self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound()) def test_spawn_with_delete_exception_file_fault(self): self._spawn_with_delete_exception(vmwareapi_fake.FileFault()) def test_spawn_with_delete_exception_cannot_delete_file(self): self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile()) def test_spawn_with_delete_exception_file_locked(self): self._spawn_with_delete_exception(vmwareapi_fake.FileLocked()) def test_spawn_with_delete_exception_general(self): self._spawn_with_delete_exception() def test_spawn_disk_extend(self): self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk') requested_size = 80 * units.Mi self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(), requested_size, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_spawn_disk_extend_exists(self): root = ('[%s] vmware_base/fake_image_uuid/fake_image_uuid.80.vmdk' % self.ds) self.root = root def _fake_extend(instance, requested_size, name, dc_ref): vmwareapi_fake._add_file(self.root) self.stubs.Set(self.conn._vmops, '_extend_virtual_disk', _fake_extend) self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.assertTrue(vmwareapi_fake.get_file(root)) def test_spawn_disk_extend_sparse(self): self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') result = [1024, {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic", "vmware_disktype": "sparse"}] vmware_images.get_vmdk_size_and_properties( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(result) self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk') requested_size = 80 * units.Mi self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(), requested_size, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_spawn_disk_extend_insufficient_disk_space(self): self.flags(use_linked_clone=True, group='vmware') self.wait_task = self.conn._session._wait_for_task self.call_method = self.conn._session._call_method self.task_ref = None id = 'fake_image_uuid' cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds, id, id) tmp_file = '[%s] vmware_base/%s/%s.80-flat.vmdk' % (self.ds, id, id) def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None self.assertTrue(vmwareapi_fake.get_file(cached_image)) self.assertTrue(vmwareapi_fake.get_file(tmp_file)) raise exception.NovaException('No space!') return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "ExtendVirtualDisk_Task": self.task_ref = task_ref return task_ref self.stubs.Set(self.conn._session, "_call_method", fake_call_method) self.stubs.Set(self.conn._session, "_wait_for_task", fake_wait_for_task) self.assertRaises(exception.NovaException, self._create_vm) self.assertFalse(vmwareapi_fake.get_file(cached_image)) self.assertFalse(vmwareapi_fake.get_file(tmp_file)) def test_spawn_disk_invalid_disk_size(self): self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') result = [82 * units.Gi, {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic", "vmware_disktype": "sparse"}] vmware_images.get_vmdk_size_and_properties( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(result) self.mox.ReplayAll() self.assertRaises(exception.InstanceUnacceptable, self._create_vm) def test_spawn_invalid_disk_format(self): self._create_instance() self.image['disk_format'] = 'invalid' self.assertRaises(exception.InvalidDiskFormat, self.conn.spawn, self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=None) def test_spawn_with_move_file_exists_exception(self): # The test will validate that the spawn completes # successfully. The "MoveDatastoreFile_Task" will # raise an file exists exception. The flag # self.exception will be checked to see that # the exception has indeed been raised. def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None self.exception = True raise error_util.FileAlreadyExistsException() return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": self.task_ref = task_ref return task_ref with contextlib.nested( mock.patch.object(self.conn._session, '_wait_for_task', fake_wait_for_task), mock.patch.object(self.conn._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.assertTrue(self.exception) def test_spawn_with_move_general_exception(self): # The test will validate that the spawn completes # successfully. The "MoveDatastoreFile_Task" will # raise a general exception. The flag self.exception # will be checked to see that the exception has # indeed been raised. def fake_wait_for_task(task_ref): if task_ref == self.task_ref: self.task_ref = None self.exception = True raise error_util.VMwareDriverException('Exception!') return self.wait_task(task_ref) def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": self.task_ref = task_ref return task_ref with contextlib.nested( mock.patch.object(self.conn._session, '_wait_for_task', fake_wait_for_task), mock.patch.object(self.conn._session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): self.assertRaises(error_util.VMwareDriverException, self._create_vm) self.assertTrue(self.exception) def test_spawn_with_move_poll_exception(self): self.call_method = self.conn._session._call_method def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": task_mdo = vmwareapi_fake.create_task(method, "error") return task_mdo.obj return task_ref with ( mock.patch.object(self.conn._session, '_call_method', fake_call_method) ): self.assertRaises(error_util.VMwareDriverException, self._create_vm) def test_spawn_with_move_file_exists_poll_exception(self): # The test will validate that the spawn completes # successfully. The "MoveDatastoreFile_Task" will # raise a file exists exception. The flag self.exception # will be checked to see that the exception has # indeed been raised. def fake_call_method(module, method, *args, **kwargs): task_ref = self.call_method(module, method, *args, **kwargs) if method == "MoveDatastoreFile_Task": self.exception = True task_mdo = vmwareapi_fake.create_task(method, "error", error_fault=vmwareapi_fake.FileAlreadyExists()) return task_mdo.obj return task_ref with ( mock.patch.object(self.conn._session, '_call_method', fake_call_method) ): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.assertTrue(self.exception) def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False): self._create_instance(set_image_ref=set_image_ref) self.mox.StubOutWithMock(block_device, 'volume_in_mapping') self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping') connection_info = self._test_vmdk_connection_info('vmdk') root_disk = [{'connection_info': connection_info}] v_driver.block_device_info_get_mapping( mox.IgnoreArg()).AndReturn(root_disk) if vc_support: self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_get_res_pool_of_vm') volumeops.VMwareVolumeOps._get_res_pool_of_vm( mox.IgnoreArg()).AndReturn('fake_res_pool') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume') volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(), 'fake_res_pool', mox.IgnoreArg()) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_volume') volumeops.VMwareVolumeOps.attach_volume(connection_info, self.instance, mox.IgnoreArg()) self.mox.ReplayAll() block_device_info = {'mount_device': 'vda'} self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=block_device_info) def test_spawn_attach_volume_vmdk(self): self._spawn_attach_volume_vmdk() def test_spawn_attach_volume_vmdk_no_image_ref(self): self._spawn_attach_volume_vmdk(set_image_ref=False) def test_spawn_attach_volume_iscsi(self): self._create_instance() self.mox.StubOutWithMock(block_device, 'volume_in_mapping') self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping') connection_info = self._test_vmdk_connection_info('iscsi') root_disk = [{'connection_info': connection_info}] v_driver.block_device_info_get_mapping( mox.IgnoreArg()).AndReturn(root_disk) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_volume') volumeops.VMwareVolumeOps.attach_volume(connection_info, self.instance, mox.IgnoreArg()) self.mox.ReplayAll() block_device_info = {'mount_device': 'vda'} self.conn.spawn(self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=block_device_info) def mock_upload_image(self, context, image, instance, **kwargs): self.assertEqual(image, 'Test-Snapshot') self.assertEqual(instance, self.instance) self.assertEqual(kwargs['disk_type'], 'preallocated') def test_get_vm_ref_using_extra_config(self): self._create_vm() vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session, self.instance['uuid']) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') # Disrupt the fake Virtual Machine object so that extraConfig # cannot be matched. fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0] fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "" # We should not get a Virtual Machine through extraConfig. vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session, self.instance['uuid']) self.assertIsNone(vm_ref, 'VM Reference should be none') # Check if we can find the Virtual Machine using the name. vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') def test_search_vm_ref_by_identifier(self): self._create_vm() vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session, self.instance['uuid']) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0] fake_vm.set("summary.config.instanceUuid", "foo") fake_vm.set("name", "foo") fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo" self.assertIsNone(vm_util.search_vm_ref_by_identifier( self.conn._session, self.instance['uuid']), "VM Reference should be none") self.assertIsNotNone( vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"), "VM Reference should not be none") def test_get_object_for_optionvalue(self): self._create_vm() vms = self.conn._session._call_method(vim_util, "get_objects", "VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]']) vm_ref = vm_util._get_object_for_optionvalue(vms, self.instance["uuid"]) self.assertIsNotNone(vm_ref, 'VM Reference cannot be none') def _test_snapshot(self): expected_calls = [ {'args': (), 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}}, {'args': (), 'kwargs': {'task_state': task_states.IMAGE_UPLOADING, 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}] func_call_matcher = matchers.FunctionCallMatcher(expected_calls) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) with mock.patch.object(vmware_images, 'upload_image', self.mock_upload_image): self.conn.snapshot(self.context, self.instance, "Test-Snapshot", func_call_matcher.call) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.assertIsNone(func_call_matcher.match()) def test_snapshot(self): self._create_vm() self._test_snapshot() def test_snapshot_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.snapshot, self.context, self.instance, "Test-Snapshot", lambda *args, **kwargs: None) def test_snapshot_delete_vm_snapshot(self): self._create_vm() fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj snapshot_ref = vmwareapi_fake.ManagedObjectReference( value="Snapshot-123", name="VirtualMachineSnapshot") self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_vm_snapshot') self.conn._vmops._create_vm_snapshot( self.instance, fake_vm).AndReturn(snapshot_ref) self.mox.StubOutWithMock(vmops.VMwareVMOps, '_delete_vm_snapshot') self.conn._vmops._delete_vm_snapshot( self.instance, fake_vm, snapshot_ref).AndReturn(None) self.mox.ReplayAll() self._test_snapshot() def test_reboot(self): self._create_vm() info = self.conn.get_info({'name': 1, 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) reboot_type = "SOFT" self.conn.reboot(self.context, self.instance, self.network_info, reboot_type) info = self.conn.get_info({'name': 1, 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_reboot_with_uuid(self): """Test fall back to use name when can't find by uuid.""" self._create_vm() info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) reboot_type = "SOFT" self.conn.reboot(self.context, self.instance, self.network_info, reboot_type) info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_reboot_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.reboot, self.context, self.instance, self.network_info, 'SOFT') def test_poll_rebooting_instances(self): self.mox.StubOutWithMock(compute_api.API, 'reboot') compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self._create_vm() instances = [self.instance] self.conn.poll_rebooting_instances(60, instances) def test_reboot_not_poweredon(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot, self.context, self.instance, self.network_info, 'SOFT') def test_suspend(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) def test_suspend_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.suspend, self.instance) def test_resume(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) self.conn.resume(self.context, self.instance, self.network_info) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_resume_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.resume, self.context, self.instance, self.network_info) def test_resume_not_suspended(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.assertRaises(exception.InstanceResumeFailure, self.conn.resume, self.context, self.instance, self.network_info) def test_power_on(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.power_off(self.instance) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SHUTDOWN) self.conn.power_on(self.context, self.instance, self.network_info) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_power_on_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.power_on, self.context, self.instance, self.network_info) def test_power_off(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.power_off(self.instance) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SHUTDOWN) def test_power_off_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.power_off, self.instance) def test_power_off_suspended(self): self._create_vm() self.conn.suspend(self.instance) info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SUSPENDED) self.assertRaises(exception.InstancePowerOffFailure, self.conn.power_off, self.instance) def test_resume_state_on_host_boot(self): self._create_vm() self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name') self.mox.StubOutWithMock(self.conn, "reboot") vm_util.get_vm_state_from_name(mox.IgnoreArg(), self.instance['uuid']).AndReturn("poweredOff") self.conn.reboot(self.context, self.instance, 'network_info', 'hard', None) self.mox.ReplayAll() self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') def test_resume_state_on_host_boot_no_reboot_1(self): """Don't call reboot on instance which is poweredon.""" self._create_vm() self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name') self.mox.StubOutWithMock(self.conn, 'reboot') vm_util.get_vm_state_from_name(mox.IgnoreArg(), self.instance['uuid']).AndReturn("poweredOn") self.mox.ReplayAll() self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') def test_resume_state_on_host_boot_no_reboot_2(self): """Don't call reboot on instance which is suspended.""" self._create_vm() self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name') self.mox.StubOutWithMock(self.conn, 'reboot') vm_util.get_vm_state_from_name(mox.IgnoreArg(), self.instance['uuid']).AndReturn("suspended") self.mox.ReplayAll() self.conn.resume_state_on_host_boot(self.context, self.instance, 'network_info') def test_get_info(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def destroy_rescued(self, fake_method): self._rescue() with ( mock.patch.object(self.conn._volumeops, "detach_disk_from_vm", fake_method) ): self.instance['vm_state'] = vm_states.RESCUED self.conn.destroy(self.context, self.instance, self.network_info) inst_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) self.assertFalse(vmwareapi_fake.get_file(inst_path)) rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds, self.uuid, self.uuid) self.assertFalse(vmwareapi_fake.get_file(rescue_file_path)) def test_destroy_rescued(self): def fake_detach_disk_from_vm(*args, **kwargs): pass self.destroy_rescued(fake_detach_disk_from_vm) def test_destroy_rescued_with_exception(self): def fake_detach_disk_from_vm(*args, **kwargs): raise exception.NovaException('Here is my fake exception') self.destroy_rescued(fake_detach_disk_from_vm) def test_destroy(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) instances = self.conn.list_instances() self.assertEqual(len(instances), 1) self.conn.destroy(self.context, self.instance, self.network_info) instances = self.conn.list_instances() self.assertEqual(len(instances), 0) self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid)) def test_destroy_no_datastore(self): self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) instances = self.conn.list_instances() self.assertEqual(len(instances), 1) # Overwrite the vmPathName vms = vmwareapi_fake._get_objects("VirtualMachine") vm = vms.objects[0] vm.set("config.files.vmPathName", None) self.conn.destroy(self.context, self.instance, self.network_info) instances = self.conn.list_instances() self.assertEqual(len(instances), 0) def test_destroy_non_existent(self): self._create_instance() self.assertIsNone(self.conn.destroy(self.context, self.instance, self.network_info)) def _rescue(self, config_drive=False): def fake_attach_disk_to_vm(vm_ref, instance, adapter_type, disk_type, vmdk_path=None, disk_size=None, linked_clone=False, controller_key=None, unit_number=None, device_name=None): info = self.conn.get_info(instance) self._check_vm_info(info, power_state.SHUTDOWN) if config_drive: def fake_create_config_drive(instance, injected_files, password, data_store_name, folder, instance_uuid, cookies): self.assertTrue(uuidutils.is_uuid_like(instance['uuid'])) self.stubs.Set(self.conn._vmops, '_create_config_drive', fake_create_config_drive) self._create_vm() info = self.conn.get_info({'name': 1, 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm", fake_attach_disk_to_vm) self.conn.rescue(self.context, self.instance, self.network_info, self.image, 'fake-password') info = self.conn.get_info({'name': '1-rescue', 'uuid': '%s-rescue' % self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) info = self.conn.get_info({'name': 1, 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.SHUTDOWN) self.assertIsNotNone(vm_util.vm_ref_cache_get('%s-rescue' % self.uuid)) def test_rescue(self): self._rescue() inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid) self.assertTrue(vmwareapi_fake.get_file(inst_file_path)) rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds, self.uuid, self.uuid) self.assertTrue(vmwareapi_fake.get_file(rescue_file_path)) def test_rescue_with_config_drive(self): self.flags(force_config_drive=True) self._rescue(config_drive=True) def test_unrescue(self): self._rescue() self.test_vm_ref = None self.test_device_name = None def fake_power_off_vm_ref(vm_ref): self.test_vm_ref = vm_ref self.assertIsNotNone(vm_ref) def fake_detach_disk_from_vm(vm_ref, instance, device_name, destroy_disk=False): self.test_device_name = device_name info = self.conn.get_info(instance) self._check_vm_info(info, power_state.SHUTDOWN) with contextlib.nested( mock.patch.object(self.conn._vmops, "_power_off_vm_ref", side_effect=fake_power_off_vm_ref), mock.patch.object(self.conn._volumeops, "detach_disk_from_vm", side_effect=fake_detach_disk_from_vm), ) as (poweroff, detach): self.conn.unrescue(self.instance, None) poweroff.assert_called_once_with(self.test_vm_ref) detach.assert_called_once_with(self.test_vm_ref, mock.ANY, self.test_device_name) self.test_vm_ref = None self.test_device_name = None info = self.conn.get_info({'name': 1, 'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_pause(self): # Tests that the VMwareESXDriver does not implement the pause method. self.assertRaises(NotImplementedError, self.conn.pause, instance=None) def test_unpause(self): # Tests that the VMwareESXDriver does not implement the unpause method. self.assertRaises(NotImplementedError, self.conn.unpause, instance=None) def test_get_diagnostics(self): self._create_vm() expected = {'memoryReservation': 0, 'suspendInterval': 0, 'maxCpuUsage': 2000, 'toolsInstallerMounted': False, 'consumedOverheadMemory': 20, 'numEthernetCards': 1, 'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}], 'memoryOverhead': 21417984, 'guestMemoryUsage': 0, 'connectionState': 'connected', 'memorySizeMB': 512, 'balloonedMemory': 0, 'vmPathName': 'fake_path', 'template': False, 'overallCpuUsage': 0, 'powerState': 'poweredOn', 'cpuReservation': 0, 'overallCpuDemand': 0, 'numVirtualDisks': 1, 'hostMemoryUsage': 141} expected = dict([('vmware:' + k, v) for k, v in expected.items()]) self.assertThat( self.conn.get_diagnostics({'name': 1, 'uuid': self.uuid, 'node': self.instance_node}), matchers.DictMatches(expected)) def test_get_console_output(self): self.assertRaises(NotImplementedError, self.conn.get_console_output, None, None) def _test_finish_migration(self, power_on, resize_instance=False): """Tests the finish_migration method on vmops.""" self.power_on_called = False self.wait_for_task = False self.wait_task = self.conn._session._wait_for_task def fake_power_on(instance): self.assertEqual(self.instance, instance) self.power_on_called = True def fake_vmops_update_instance_progress(context, instance, step, total_steps): self.assertEqual(self.context, context) self.assertEqual(self.instance, instance) self.assertEqual(4, step) self.assertEqual(vmops.RESIZE_TOTAL_STEPS, total_steps) if resize_instance: def fake_wait_for_task(task_ref): self.wait_for_task = True return self.wait_task(task_ref) self.stubs.Set(self.conn._session, "_wait_for_task", fake_wait_for_task) self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on) self.stubs.Set(self.conn._vmops, "_update_instance_progress", fake_vmops_update_instance_progress) # setup the test instance in the database self._create_vm() # perform the migration on our stubbed methods self.conn.finish_migration(context=self.context, migration=None, instance=self.instance, disk_info=None, network_info=None, block_device_info=None, resize_instance=resize_instance, image_meta=None, power_on=power_on) if resize_instance: self.assertTrue(self.wait_for_task) else: self.assertFalse(self.wait_for_task) def test_finish_migration_power_on(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=True) def test_finish_migration_power_off(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=False) def test_confirm_migration(self): self._create_vm() self.assertRaises(NotImplementedError, self.conn.confirm_migration, self.context, self.instance, None) def _test_finish_revert_migration(self, power_on): """Tests the finish_revert_migration method on vmops.""" # setup the test instance in the database self._create_vm() self.power_on_called = False self.vm_name = str(self.instance['name']) + '-orig' def fake_power_on(instance): self.assertEqual(self.instance, instance) self.power_on_called = True def fake_get_orig_vm_name_label(instance): self.assertEqual(self.instance, instance) return self.vm_name def fake_get_vm_ref_from_name(session, vm_name): self.assertEqual(self.vm_name, vm_name) return vmwareapi_fake._get_objects("VirtualMachine").objects[0] def fake_get_vm_ref_from_uuid(session, vm_uuid): return vmwareapi_fake._get_objects("VirtualMachine").objects[0] def fake_call_method(*args, **kwargs): pass def fake_wait_for_task(*args, **kwargs): pass self.stubs.Set(self.conn._vmops, "_power_on", fake_power_on) self.stubs.Set(self.conn._vmops, "_get_orig_vm_name_label", fake_get_orig_vm_name_label) self.stubs.Set(vm_util, "_get_vm_ref_from_uuid", fake_get_vm_ref_from_uuid) self.stubs.Set(vm_util, "get_vm_ref_from_name", fake_get_vm_ref_from_name) self.stubs.Set(self.conn._session, "_call_method", fake_call_method) self.stubs.Set(self.conn._session, "_wait_for_task", fake_wait_for_task) # perform the revert on our stubbed methods self.conn.finish_revert_migration(self.context, instance=self.instance, network_info=None, power_on=power_on) def test_finish_revert_migration_power_on(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=True) def test_finish_revert_migration_power_off(self): self.assertRaises(NotImplementedError, self._test_finish_migration, power_on=False) def test_get_console_pool_info(self): info = self.conn.get_console_pool_info("console_type") self.assertEqual(info['address'], 'test_url') self.assertEqual(info['username'], 'test_username') self.assertEqual(info['password'], 'test_pass') def test_get_vnc_console_non_existent(self): self._create_instance() self.assertRaises(exception.InstanceNotFound, self.conn.get_vnc_console, self.context, self.instance) def _test_get_vnc_console(self): self._create_vm() fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0] OptionValue = collections.namedtuple('OptionValue', ['key', 'value']) opt_val = OptionValue(key='', value=5906) fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val) vnc_dict = self.conn.get_vnc_console(self.context, self.instance) self.assertEqual(vnc_dict['host'], self.vnc_host) self.assertEqual(vnc_dict['port'], 5906) def test_get_vnc_console(self): self._test_get_vnc_console() def test_get_vnc_console_noport(self): self._create_vm() fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0] self.assertRaises(exception.ConsoleTypeUnavailable, self.conn.get_vnc_console, self.context, self.instance) def test_host_ip_addr(self): self.assertEqual(self.conn.get_host_ip_addr(), "test_url") def test_get_volume_connector(self): self._create_vm() connector_dict = self.conn.get_volume_connector(self.instance) fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0] fake_vm_id = fake_vm.obj.value self.assertEqual(connector_dict['ip'], 'test_url') self.assertEqual(connector_dict['initiator'], 'iscsi-name') self.assertEqual(connector_dict['host'], 'test_url') self.assertEqual(connector_dict['instance'], fake_vm_id) def _test_vmdk_connection_info(self, type): return {'driver_volume_type': type, 'serial': 'volume-fake-id', 'data': {'volume': 'vm-10', 'volume_id': 'volume-fake-id'}} def test_volume_attach_vmdk(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_attach_volume_vmdk') volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_volume_detach_vmdk(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_detach_volume_vmdk') volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_attach_vmdk_disk_to_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' # create fake backing info volume_device = vmwareapi_fake.DataObject() volume_device.backing = vmwareapi_fake.DataObject() volume_device.backing.fileName = 'fake_path' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_get_vmdk_base_volume_device') volumeops.VMwareVolumeOps._get_vmdk_base_volume_device( mox.IgnoreArg()).AndReturn(volume_device) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_disk_to_vm') volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), mox.IgnoreArg(), vmdk_path='fake_path') self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_detach_vmdk_disk_from_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('vmdk') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_get_volume_uuid') volumeops.VMwareVolumeOps._get_volume_uuid(mox.IgnoreArg(), 'volume-fake-id').AndReturn('fake_disk_uuid') self.mox.StubOutWithMock(vm_util, 'get_vmdk_backed_disk_device') vm_util.get_vmdk_backed_disk_device(mox.IgnoreArg(), 'fake_disk_uuid').AndReturn('fake_device') self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_consolidate_vmdk_volume') volumeops.VMwareVolumeOps._consolidate_vmdk_volume(self.instance, mox.IgnoreArg(), 'fake_device', mox.IgnoreArg()) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(), self.instance, mox.IgnoreArg()) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_volume_attach_iscsi(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_attach_volume_iscsi') volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_volume_detach_iscsi(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') mount_point = '/dev/vdc' self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, '_detach_volume_iscsi') volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info, self.instance, mount_point) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_attach_iscsi_disk_to_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') connection_info['data']['target_portal'] = 'fake_target_host:port' connection_info['data']['target_iqn'] = 'fake_target_iqn' mount_point = '/dev/vdc' discover = ('fake_name', 'fake_uuid') self.mox.StubOutWithMock(volume_util, 'find_st') # simulate target not found volume_util.find_st(mox.IgnoreArg(), connection_info['data'], mox.IgnoreArg()).AndReturn((None, None)) self.mox.StubOutWithMock(volume_util, '_add_iscsi_send_target_host') # rescan gets called with target portal volume_util.rescan_iscsi_hba( self.conn._session, target_portal=connection_info['data']['target_portal']) # simulate target found volume_util.find_st(mox.IgnoreArg(), connection_info['data'], mox.IgnoreArg()).AndReturn(discover) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'attach_disk_to_vm') volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(), self.instance, mox.IgnoreArg(), 'rdmp', device_name=mox.IgnoreArg()) self.mox.ReplayAll() self.conn.attach_volume(None, connection_info, self.instance, mount_point) def test_rescan_iscsi_hba(self): fake_target_portal = 'fake_target_host:port' host_storage_sys = vmwareapi_fake._get_objects( "HostStorageSystem").objects[0] iscsi_hba_array = host_storage_sys.get('storageDeviceInfo' '.hostBusAdapter') iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0] # Check the host system does not have the send target self.assertRaises(AttributeError, getattr, iscsi_hba, 'configuredSendTarget') # Rescan HBA with the target portal volume_util.rescan_iscsi_hba(self.conn._session, None, fake_target_portal) # Check if HBA has the target portal configured self.assertEqual('fake_target_host', iscsi_hba.configuredSendTarget[0].address) # Rescan HBA with same portal volume_util.rescan_iscsi_hba(self.conn._session, None, fake_target_portal) self.assertEqual(1, len(iscsi_hba.configuredSendTarget)) def test_find_st(self): data = {'target_portal': 'fake_target_host:port', 'target_iqn': 'fake_target_iqn'} host = vmwareapi_fake._get_objects('HostSystem').objects[0] host._add_iscsi_target(data) result = volume_util.find_st(self.conn._session, data) self.assertEqual(('fake-device', 'fake-uuid'), result) def test_detach_iscsi_disk_from_vm(self): self._create_vm() connection_info = self._test_vmdk_connection_info('iscsi') connection_info['data']['target_portal'] = 'fake_target_portal' connection_info['data']['target_iqn'] = 'fake_target_iqn' mount_point = '/dev/vdc' find = ('fake_name', 'fake_uuid') self.mox.StubOutWithMock(volume_util, 'find_st') volume_util.find_st(mox.IgnoreArg(), connection_info['data'], mox.IgnoreArg()).AndReturn(find) self.mox.StubOutWithMock(vm_util, 'get_rdm_disk') device = 'fake_device' vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device) self.mox.StubOutWithMock(volumeops.VMwareVolumeOps, 'detach_disk_from_vm') volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(), self.instance, device, destroy_disk=True) self.mox.ReplayAll() self.conn.detach_volume(connection_info, self.instance, mount_point, encryption=None) def test_connection_info_get(self): self._create_vm() connector = self.conn.get_volume_connector(self.instance) self.assertEqual(connector['ip'], 'test_url') self.assertEqual(connector['host'], 'test_url') self.assertEqual(connector['initiator'], 'iscsi-name') self.assertIn('instance', connector) def test_connection_info_get_after_destroy(self): self._create_vm() self.conn.destroy(self.context, self.instance, self.network_info) connector = self.conn.get_volume_connector(self.instance) self.assertEqual(connector['ip'], 'test_url') self.assertEqual(connector['host'], 'test_url') self.assertEqual(connector['initiator'], 'iscsi-name') self.assertNotIn('instance', connector) def test_refresh_instance_security_rules(self): self.assertRaises(NotImplementedError, self.conn.refresh_instance_security_rules, instance=None) def test_image_aging_image_used(self): self._create_vm() all_instances = [self.instance] self.conn.manage_image_cache(self.context, all_instances) self._cached_files_exist() def _get_timestamp_filename(self): return '%s%s' % (imagecache.TIMESTAMP_PREFIX, timeutils.strtime(at=self.old_time, fmt=imagecache.TIMESTAMP_FORMAT)) def _override_time(self): self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00) def _fake_get_timestamp_filename(fake): return self._get_timestamp_filename() self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename', _fake_get_timestamp_filename) def _timestamp_file_exists(self, exists=True): timestamp = ('[%s] vmware_base/fake_image_uuid/%s/' % (self.ds, self._get_timestamp_filename())) if exists: self.assertTrue(vmwareapi_fake.get_file(timestamp)) else: self.assertFalse(vmwareapi_fake.get_file(timestamp)) def _image_aging_image_marked_for_deletion(self): self._create_vm(uuid=uuidutils.generate_uuid()) self._cached_files_exist() all_instances = [] self.conn.manage_image_cache(self.context, all_instances) self._cached_files_exist() self._timestamp_file_exists() def test_image_aging_image_marked_for_deletion(self): self._override_time() self._image_aging_image_marked_for_deletion() def _timestamp_file_removed(self): self._override_time() self._image_aging_image_marked_for_deletion() self._create_vm(num_instances=2, uuid=uuidutils.generate_uuid()) self._timestamp_file_exists(exists=False) def test_timestamp_file_removed_spawn(self): self._timestamp_file_removed() def test_timestamp_file_removed_aging(self): self._timestamp_file_removed() ts = self._get_timestamp_filename() ts_path = ('[%s] vmware_base/fake_image_uuid/%s/' % (self.ds, ts)) vmwareapi_fake._add_file(ts_path) self._timestamp_file_exists() all_instances = [self.instance] self.conn.manage_image_cache(self.context, all_instances) self._timestamp_file_exists(exists=False) def test_image_aging_disabled(self): self._override_time() self.flags(remove_unused_base_images=False) self._create_vm() self._cached_files_exist() all_instances = [] self.conn.manage_image_cache(self.context, all_instances) self._cached_files_exist(exists=True) self._timestamp_file_exists(exists=False) def _image_aging_aged(self, aging_time=100): self._override_time() cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.flags(remove_unused_original_minimum_age_seconds=aging_time) self._image_aging_image_marked_for_deletion() all_instances = [] timeutils.set_time_override(cur_time) self.conn.manage_image_cache(self.context, all_instances) def test_image_aging_aged(self): self._image_aging_aged(aging_time=8) self._cached_files_exist(exists=False) def test_image_aging_not_aged(self): self._image_aging_aged() self._cached_files_exist() class VMwareAPIHostTestCase(test.NoDBTestCase): """Unit tests for Vmware API host calls.""" def setUp(self): super(VMwareAPIHostTestCase, self).setUp() self.flags(image_cache_subdirectory_name='vmware_base') vm_util.vm_refs_cache_reset() self.flags(host_ip='test_url', host_username='test_username', host_password='test_pass', group='vmware') vmwareapi_fake.reset() stubs.set_stubs(self.stubs) self.conn = driver.VMwareESXDriver(False) def tearDown(self): super(VMwareAPIHostTestCase, self).tearDown() vmwareapi_fake.cleanup() def test_host_state(self): stats = self.conn.get_host_stats() self.assertEqual(stats['vcpus'], 16) self.assertEqual(stats['disk_total'], 1024) self.assertEqual(stats['disk_available'], 500) self.assertEqual(stats['disk_used'], 1024 - 500) self.assertEqual(stats['host_memory_total'], 1024) self.assertEqual(stats['host_memory_free'], 1024 - 500) self.assertEqual(stats['hypervisor_version'], 5000000) supported_instances = [('i686', 'vmware', 'hvm'), ('x86_64', 'vmware', 'hvm')] self.assertEqual(stats['supported_instances'], supported_instances) def _test_host_action(self, method, action, expected=None): result = method('host', action) self.assertEqual(result, expected) def test_host_reboot(self): self._test_host_action(self.conn.host_power_action, 'reboot') def test_host_shutdown(self): self._test_host_action(self.conn.host_power_action, 'shutdown') def test_host_startup(self): self._test_host_action(self.conn.host_power_action, 'startup') def test_host_maintenance_on(self): self._test_host_action(self.conn.host_maintenance_mode, True) def test_host_maintenance_off(self): self._test_host_action(self.conn.host_maintenance_mode, False) def test_get_host_uptime(self): result = self.conn.get_host_uptime('host') self.assertEqual('Please refer to test_url for the uptime', result) class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase): def setUp(self): super(VMwareAPIVCDriverTestCase, self).setUp() cluster_name = 'test_cluster' cluster_name2 = 'test_cluster2' self.flags(cluster_name=[cluster_name, cluster_name2], api_retry_count=1, task_poll_interval=10, datastore_regex='.*', group='vmware') self.flags(vnc_enabled=False, image_cache_subdirectory_name='vmware_base') vmwareapi_fake.reset(vc=True) self.conn = driver.VMwareVCDriver(None, False) self.node_name = self.conn._resources.keys()[0] self.node_name2 = self.conn._resources.keys()[1] if cluster_name2 in self.node_name2: self.ds = 'ds1' else: self.ds = 'ds2' self.vnc_host = 'ha-host' def tearDown(self): super(VMwareAPIVCDriverTestCase, self).tearDown() vmwareapi_fake.cleanup() def test_list_instances(self): instances = self.conn.list_instances() self.assertEqual(0, len(instances)) def test_list_instances_from_nodes(self): # Create instance on node1 self._create_vm(self.node_name) # Create instances on the other node self._create_vm(self.node_name2, num_instances=2) self._create_vm(self.node_name2, num_instances=3) node1_vmops = self.conn._get_vmops_for_compute_node(self.node_name) node2_vmops = self.conn._get_vmops_for_compute_node(self.node_name2) self.assertEqual(1, len(node1_vmops.list_instances())) self.assertEqual(2, len(node2_vmops.list_instances())) self.assertEqual(3, len(self.conn.list_instances())) def _setup_mocks_for_session(self, mock_init): mock_init.return_value = None vcdriver = driver.VMwareVCDriver(None, False) vcdriver._session = mock.Mock() return vcdriver @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__') def test_init_host_and_cleanup_host(self, mock_init): vcdriver = self._setup_mocks_for_session(mock_init) vcdriver.init_host("foo") vcdriver._session._create_session.assert_called_once() vcdriver.cleanup_host("foo") vcdriver._session.vim.client.service.Logout.assert_called_once() @mock.patch('nova.virt.vmwareapi.driver.LOG') @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__') def test_cleanup_host_with_no_login(self, mock_init, mock_logger): vcdriver = self._setup_mocks_for_session(mock_init) vcdriver.init_host("foo") vcdriver._session._create_session.assert_called_once() # Not logged in... # observe that no exceptions were thrown mock_sc = mock.Mock() vcdriver._session.vim.retrieve_service_content.return_value = mock_sc web_fault = suds.WebFault(mock.Mock(), mock.Mock()) vcdriver._session.vim.client.service.Logout.side_effect = web_fault vcdriver.cleanup_host("foo") # assert that the mock Logout method was never called vcdriver._session.vim.client.service.Logout.assert_called_once() mock_logger.debug.assert_called_once() def test_host_power_action(self): self.assertRaises(NotImplementedError, self.conn.host_power_action, 'host', 'action') def test_host_maintenance_mode(self): self.assertRaises(NotImplementedError, self.conn.host_maintenance_mode, 'host', 'mode') def test_set_host_enabled(self): self.assertRaises(NotImplementedError, self.conn.set_host_enabled, 'host', 'state') def test_datastore_regex_configured(self): for node in self.conn._resources.keys(): self.assertEqual(self.conn._datastore_regex, self.conn._resources[node]['vmops']._datastore_regex) def test_get_available_resource(self): stats = self.conn.get_available_resource(self.node_name) cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"], "vendor": ["Intel", "Intel"], "topology": {"cores": 16, "threads": 32}} self.assertEqual(stats['vcpus'], 32) self.assertEqual(stats['local_gb'], 1024) self.assertEqual(stats['local_gb_used'], 1024 - 500) self.assertEqual(stats['memory_mb'], 1000) self.assertEqual(stats['memory_mb_used'], 500) self.assertEqual(stats['hypervisor_type'], 'VMware vCenter Server') self.assertEqual(stats['hypervisor_version'], 5001000) self.assertEqual(stats['hypervisor_hostname'], self.node_name) self.assertEqual(stats['cpu_info'], jsonutils.dumps(cpu_info)) self.assertEqual(stats['supported_instances'], '[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]') def test_invalid_datastore_regex(self): # Tests if we raise an exception for Invalid Regular Expression in # vmware_datastore_regex self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01', group='vmware') self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None) def test_get_available_nodes(self): nodelist = self.conn.get_available_nodes() self.assertEqual(len(nodelist), 2) self.assertIn(self.node_name, nodelist) self.assertIn(self.node_name2, nodelist) def test_spawn_multiple_node(self): def fake_is_neutron(): return False self.stubs.Set(nova_utils, 'is_neutron', fake_is_neutron) uuid1 = uuidutils.generate_uuid() uuid2 = uuidutils.generate_uuid() self._create_vm(node=self.node_name, num_instances=1, uuid=uuid1) info = self.conn.get_info({'uuid': uuid1, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) self.conn.destroy(self.context, self.instance, self.network_info) self._create_vm(node=self.node_name2, num_instances=1, uuid=uuid2) info = self.conn.get_info({'uuid': uuid2, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_finish_migration_power_on(self): self._test_finish_migration(power_on=True) self.assertEqual(True, self.power_on_called) def test_finish_migration_power_off(self): self._test_finish_migration(power_on=False) self.assertEqual(False, self.power_on_called) def test_finish_migration_power_on_resize(self): self._test_finish_migration(power_on=True, resize_instance=True) self.assertEqual(True, self.power_on_called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(power_on=True) self.assertEqual(True, self.power_on_called) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(power_on=False) self.assertEqual(False, self.power_on_called) def test_snapshot(self): # Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called # two times self.mox.StubOutWithMock(vmops.VMwareVCVMOps, 'get_copy_virtual_disk_spec') self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() self._create_vm() self._test_snapshot() def test_snapshot_using_file_manager(self): self._create_vm() uuid_str = uuidutils.generate_uuid() self.mox.StubOutWithMock(uuidutils, 'generate_uuid') uuidutils.generate_uuid().AndReturn(uuid_str) self.mox.StubOutWithMock(ds_util, 'file_delete') # Check calls for delete vmdk and -flat.vmdk pair ds_util.file_delete(mox.IgnoreArg(), "[%s] vmware_temp/%s-flat.vmdk" % (self.ds, uuid_str), mox.IgnoreArg()).AndReturn(None) ds_util.file_delete(mox.IgnoreArg(), "[%s] vmware_temp/%s.vmdk" % (self.ds, uuid_str), mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() self._test_snapshot() def test_spawn_invalid_node(self): self._create_instance(node='InvalidNodeName') self.assertRaises(exception.NotFound, self.conn.spawn, self.context, self.instance, self.image, injected_files=[], admin_password=None, network_info=self.network_info, block_device_info=None) def test_spawn_with_sparse_image(self): # Only a sparse disk image triggers the copy self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties') result = [1024, {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic", "vmware_disktype": "sparse"}] vmware_images.get_vmdk_size_and_properties( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(result) # Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called # two times self.mox.StubOutWithMock(vmops.VMwareVCVMOps, 'get_copy_virtual_disk_spec') self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.conn._vmops.get_copy_virtual_disk_spec( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() self._create_vm() info = self.conn.get_info({'uuid': self.uuid, 'node': self.instance_node}) self._check_vm_info(info, power_state.RUNNING) def test_plug_vifs(self): # Check to make sure the method raises NotImplementedError. self._create_instance() self.assertRaises(NotImplementedError, self.conn.plug_vifs, instance=self.instance, network_info=None) def test_unplug_vifs(self): # Check to make sure the method raises NotImplementedError. self._create_instance() self.assertRaises(NotImplementedError, self.conn.unplug_vifs, instance=self.instance, network_info=None) def test_migrate_disk_and_power_off(self): def fake_update_instance_progress(context, instance, step, total_steps): pass def fake_get_host_ref_from_name(dest): return None self._create_vm() vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance) flavor = {'name': 'fake', 'flavorid': 'fake_id'} self.stubs.Set(self.conn._vmops, "_update_instance_progress", fake_update_instance_progress) self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name", fake_get_host_ref_from_name) self.conn.migrate_disk_and_power_off(self.context, self.instance, 'fake_dest', flavor, None) vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) self.assertNotEqual(vm_ref_orig.value, vm_ref.value, "These should be different") def test_disassociate_vmref_from_instance(self): self._create_vm() vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) vm_util.disassociate_vmref_from_instance(self.conn._session, self.instance, vm_ref, "-backup") self.assertRaises(exception.InstanceNotFound, vm_util.get_vm_ref, self.conn._session, self.instance) def test_clone_vmref_for_instance(self): self._create_vm() vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) vm_util.disassociate_vmref_from_instance(self.conn._session, self.instance, vm_ref, "-backup") host_ref = vmwareapi_fake._get_object_refs("HostSystem")[0] ds_ref = vmwareapi_fake._get_object_refs("Datastore")[0] dc_obj = vmwareapi_fake._get_objects("Datacenter").objects[0] vm_util.clone_vmref_for_instance(self.conn._session, self.instance, vm_ref, host_ref, ds_ref, dc_obj.get("vmFolder")) self.assertIsNotNone( vm_util.get_vm_ref(self.conn._session, self.instance), "No VM found") cloned_vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) self.assertNotEqual(vm_ref.value, cloned_vm_ref.value, "Reference for the cloned VM should be different") vm_obj = vmwareapi_fake._get_vm_mdo(vm_ref) cloned_vm_obj = vmwareapi_fake._get_vm_mdo(cloned_vm_ref) self.assertEqual(vm_obj.name, self.instance['uuid'] + "-backup", "Original VM name should be with suffix -backup") self.assertEqual(cloned_vm_obj.name, self.instance['uuid'], "VM name does not match instance['uuid']") self.assertRaises(error_util.MissingParameter, vm_util.clone_vmref_for_instance, self.conn._session, self.instance, None, host_ref, ds_ref, dc_obj.get("vmFolder")) def test_associate_vmref_for_instance(self): self._create_vm() vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance) # First disassociate the VM from the instance so that we have a VM # to later associate using the associate_vmref_for_instance method vm_util.disassociate_vmref_from_instance(self.conn._session, self.instance, vm_ref, "-backup") # Ensure that the VM is indeed disassociated and that we cannot find # the VM using the get_vm_ref method self.assertRaises(exception.InstanceNotFound, vm_util.get_vm_ref, self.conn._session, self.instance) # Associate the VM back to the instance vm_util.associate_vmref_for_instance(self.conn._session, self.instance, suffix="-backup") # Verify if we can get the VM reference self.assertIsNotNone( vm_util.get_vm_ref(self.conn._session, self.instance), "No VM found") def test_confirm_migration(self): self._create_vm() self.conn.confirm_migration(self.context, self.instance, None) def test_spawn_attach_volume_vmdk(self): self._spawn_attach_volume_vmdk(vc_support=True) def test_spawn_attach_volume_vmdk_no_image_ref(self): self._spawn_attach_volume_vmdk(set_image_ref=False, vc_support=True) def test_pause(self): # Tests that the VMwareVCDriver does not implement the pause method. self._create_instance() self.assertRaises(NotImplementedError, self.conn.pause, self.instance) def test_unpause(self): # Tests that the VMwareVCDriver does not implement the unpause method. self._create_instance() self.assertRaises(NotImplementedError, self.conn.unpause, self.instance) def test_datastore_dc_map(self): vmops = self.conn._resources[self.node_name]['vmops'] self.assertEqual({}, vmops._datastore_dc_mapping) self._create_vm() # currently there are 2 data stores self.assertEqual(2, len(vmops._datastore_dc_mapping)) def test_rollback_live_migration_at_destination(self): with mock.patch.object(self.conn, "destroy") as mock_destroy: self.conn.rollback_live_migration_at_destination(self.context, "instance", [], None) mock_destroy.assert_called_once_with(self.context, "instance", [], None)
import itertools import warnings from mongoengine.base.common import _document_registry from mongoengine.base.fields import ( BaseField, ComplexBaseField, ObjectIdField, ) from mongoengine.common import _import_class from mongoengine.errors import InvalidDocumentError from mongoengine.queryset import ( DO_NOTHING, DoesNotExist, MultipleObjectsReturned, QuerySetManager, ) __all__ = ("DocumentMetaclass", "TopLevelDocumentMetaclass") class DocumentMetaclass(type): """Metaclass for all documents.""" # TODO lower complexity of this method def __new__(mcs, name, bases, attrs): flattened_bases = mcs._get_bases(bases) super_new = super().__new__ # If a base class just call super metaclass = attrs.get("my_metaclass") if metaclass and issubclass(metaclass, DocumentMetaclass): return super_new(mcs, name, bases, attrs) attrs["_is_document"] = attrs.get("_is_document", False) attrs["_cached_reference_fields"] = [] # EmbeddedDocuments could have meta data for inheritance if "meta" in attrs: attrs["_meta"] = attrs.pop("meta") # EmbeddedDocuments should inherit meta data if "_meta" not in attrs: meta = MetaDict() for base in flattened_bases[::-1]: # Add any mixin metadata from plain objects if hasattr(base, "meta"): meta.merge(base.meta) elif hasattr(base, "_meta"): meta.merge(base._meta) attrs["_meta"] = meta attrs["_meta"][ "abstract" ] = False # 789: EmbeddedDocument shouldn't inherit abstract # If allow_inheritance is True, add a "_cls" string field to the attrs if attrs["_meta"].get("allow_inheritance"): StringField = _import_class("StringField") attrs["_cls"] = StringField() # Handle document Fields # Merge all fields from subclasses doc_fields = {} for base in flattened_bases[::-1]: if hasattr(base, "_fields"): doc_fields.update(base._fields) # Standard object mixin - merge in any Fields if not hasattr(base, "_meta"): base_fields = {} for attr_name, attr_value in base.__dict__.items(): if not isinstance(attr_value, BaseField): continue attr_value.name = attr_name if not attr_value.db_field: attr_value.db_field = attr_name base_fields[attr_name] = attr_value doc_fields.update(base_fields) # Discover any document fields field_names = {} for attr_name, attr_value in attrs.items(): if not isinstance(attr_value, BaseField): continue attr_value.name = attr_name if not attr_value.db_field: attr_value.db_field = attr_name doc_fields[attr_name] = attr_value # Count names to ensure no db_field redefinitions field_names[attr_value.db_field] = ( field_names.get(attr_value.db_field, 0) + 1 ) # Ensure no duplicate db_fields duplicate_db_fields = [k for k, v in field_names.items() if v > 1] if duplicate_db_fields: msg = "Multiple db_fields defined for: %s " % ", ".join(duplicate_db_fields) raise InvalidDocumentError(msg) # Set _fields and db_field maps attrs["_fields"] = doc_fields attrs["_db_field_map"] = { k: getattr(v, "db_field", k) for k, v in doc_fields.items() } attrs["_reverse_db_field_map"] = { v: k for k, v in attrs["_db_field_map"].items() } attrs["_fields_ordered"] = tuple( i[1] for i in sorted((v.creation_counter, v.name) for v in doc_fields.values()) ) # # Set document hierarchy # superclasses = () class_name = [name] for base in flattened_bases: if not getattr(base, "_is_base_cls", True) and not getattr( base, "_meta", {} ).get("abstract", True): # Collate hierarchy for _cls and _subclasses class_name.append(base.__name__) if hasattr(base, "_meta"): # Warn if allow_inheritance isn't set and prevent # inheritance of classes where inheritance is set to False allow_inheritance = base._meta.get("allow_inheritance") if not allow_inheritance and not base._meta.get("abstract"): raise ValueError( "Document %s may not be subclassed. " 'To enable inheritance, use the "allow_inheritance" meta attribute.' % base.__name__ ) # Get superclasses from last base superclass document_bases = [b for b in flattened_bases if hasattr(b, "_class_name")] if document_bases: superclasses = document_bases[0]._superclasses superclasses += (document_bases[0]._class_name,) _cls = ".".join(reversed(class_name)) attrs["_class_name"] = _cls attrs["_superclasses"] = superclasses attrs["_subclasses"] = (_cls,) attrs["_types"] = attrs["_subclasses"] # TODO depreciate _types # Create the new_class new_class = super_new(mcs, name, bases, attrs) # Set _subclasses for base in document_bases: if _cls not in base._subclasses: base._subclasses += (_cls,) base._types = base._subclasses # TODO depreciate _types ( Document, EmbeddedDocument, DictField, CachedReferenceField, ) = mcs._import_classes() if issubclass(new_class, Document): new_class._collection = None # Add class to the _document_registry _document_registry[new_class._class_name] = new_class # Handle delete rules for field in new_class._fields.values(): f = field if f.owner_document is None: f.owner_document = new_class delete_rule = getattr(f, "reverse_delete_rule", DO_NOTHING) if isinstance(f, CachedReferenceField): if issubclass(new_class, EmbeddedDocument): raise InvalidDocumentError( "CachedReferenceFields is not allowed in EmbeddedDocuments" ) if f.auto_sync: f.start_listener() f.document_type._cached_reference_fields.append(f) if isinstance(f, ComplexBaseField) and hasattr(f, "field"): delete_rule = getattr(f.field, "reverse_delete_rule", DO_NOTHING) if isinstance(f, DictField) and delete_rule != DO_NOTHING: msg = ( "Reverse delete rules are not supported " "for %s (field: %s)" % (field.__class__.__name__, field.name) ) raise InvalidDocumentError(msg) f = field.field if delete_rule != DO_NOTHING: if issubclass(new_class, EmbeddedDocument): msg = ( "Reverse delete rules are not supported for " "EmbeddedDocuments (field: %s)" % field.name ) raise InvalidDocumentError(msg) f.document_type.register_delete_rule(new_class, field.name, delete_rule) if ( field.name and hasattr(Document, field.name) and EmbeddedDocument not in new_class.mro() ): msg = "%s is a document method and not a valid field name" % field.name raise InvalidDocumentError(msg) return new_class @classmethod def _get_bases(mcs, bases): if isinstance(bases, BasesTuple): return bases seen = [] bases = mcs.__get_bases(bases) unique_bases = (b for b in bases if not (b in seen or seen.append(b))) return BasesTuple(unique_bases) @classmethod def __get_bases(mcs, bases): for base in bases: if base is object: continue yield base yield from mcs.__get_bases(base.__bases__) @classmethod def _import_classes(mcs): Document = _import_class("Document") EmbeddedDocument = _import_class("EmbeddedDocument") DictField = _import_class("DictField") CachedReferenceField = _import_class("CachedReferenceField") return Document, EmbeddedDocument, DictField, CachedReferenceField class TopLevelDocumentMetaclass(DocumentMetaclass): """Metaclass for top-level documents (i.e. documents that have their own collection in the database. """ def __new__(mcs, name, bases, attrs): flattened_bases = mcs._get_bases(bases) super_new = super().__new__ # Set default _meta data if base class, otherwise get user defined meta if attrs.get("my_metaclass") == TopLevelDocumentMetaclass: # defaults attrs["_meta"] = { "abstract": True, "max_documents": None, "max_size": None, "ordering": [], # default ordering applied at runtime "indexes": [], # indexes to be ensured at runtime "id_field": None, "index_background": False, "index_opts": None, "delete_rules": None, # allow_inheritance can be True, False, and None. True means # "allow inheritance", False means "don't allow inheritance", # None means "do whatever your parent does, or don't allow # inheritance if you're a top-level class". "allow_inheritance": None, } attrs["_is_base_cls"] = True attrs["_meta"].update(attrs.get("meta", {})) else: attrs["_meta"] = attrs.get("meta", {}) # Explicitly set abstract to false unless set attrs["_meta"]["abstract"] = attrs["_meta"].get("abstract", False) attrs["_is_base_cls"] = False # Set flag marking as document class - as opposed to an object mixin attrs["_is_document"] = True # Ensure queryset_class is inherited if "objects" in attrs: manager = attrs["objects"] if hasattr(manager, "queryset_class"): attrs["_meta"]["queryset_class"] = manager.queryset_class # Clean up top level meta if "meta" in attrs: del attrs["meta"] # Find the parent document class parent_doc_cls = [ b for b in flattened_bases if b.__class__ == TopLevelDocumentMetaclass ] parent_doc_cls = None if not parent_doc_cls else parent_doc_cls[0] # Prevent classes setting collection different to their parents # If parent wasn't an abstract class if ( parent_doc_cls and "collection" in attrs.get("_meta", {}) and not parent_doc_cls._meta.get("abstract", True) ): msg = "Trying to set a collection on a subclass (%s)" % name warnings.warn(msg, SyntaxWarning) del attrs["_meta"]["collection"] # Ensure abstract documents have abstract bases if attrs.get("_is_base_cls") or attrs["_meta"].get("abstract"): if parent_doc_cls and not parent_doc_cls._meta.get("abstract", False): msg = "Abstract document cannot have non-abstract base" raise ValueError(msg) return super_new(mcs, name, bases, attrs) # Merge base class metas. # Uses a special MetaDict that handles various merging rules meta = MetaDict() for base in flattened_bases[::-1]: # Add any mixin metadata from plain objects if hasattr(base, "meta"): meta.merge(base.meta) elif hasattr(base, "_meta"): meta.merge(base._meta) # Set collection in the meta if its callable if getattr(base, "_is_document", False) and not base._meta.get("abstract"): collection = meta.get("collection", None) if callable(collection): meta["collection"] = collection(base) meta.merge(attrs.get("_meta", {})) # Top level meta # Only simple classes (i.e. direct subclasses of Document) may set # allow_inheritance to False. If the base Document allows inheritance, # none of its subclasses can override allow_inheritance to False. simple_class = all( b._meta.get("abstract") for b in flattened_bases if hasattr(b, "_meta") ) if ( not simple_class and meta["allow_inheritance"] is False and not meta["abstract"] ): raise ValueError( "Only direct subclasses of Document may set " '"allow_inheritance" to False' ) # Set default collection name if "collection" not in meta: meta["collection"] = ( "".join("_%s" % c if c.isupper() else c for c in name) .strip("_") .lower() ) attrs["_meta"] = meta # Call super and get the new class new_class = super_new(mcs, name, bases, attrs) meta = new_class._meta # Set index specifications meta["index_specs"] = new_class._build_index_specs(meta["indexes"]) # If collection is a callable - call it and set the value collection = meta.get("collection") if callable(collection): new_class._meta["collection"] = collection(new_class) # Provide a default queryset unless exists or one has been set if "objects" not in dir(new_class): new_class.objects = QuerySetManager() # Validate the fields and set primary key if needed for field_name, field in new_class._fields.items(): if field.primary_key: # Ensure only one primary key is set current_pk = new_class._meta.get("id_field") if current_pk and current_pk != field_name: raise ValueError("Cannot override primary key field") # Set primary key if not current_pk: new_class._meta["id_field"] = field_name new_class.id = field # If the document doesn't explicitly define a primary key field, create # one. Make it an ObjectIdField and give it a non-clashing name ("id" # by default, but can be different if that one's taken). if not new_class._meta.get("id_field"): id_name, id_db_name = mcs.get_auto_id_names(new_class) new_class._meta["id_field"] = id_name new_class._fields[id_name] = ObjectIdField(db_field=id_db_name) new_class._fields[id_name].name = id_name new_class.id = new_class._fields[id_name] new_class._db_field_map[id_name] = id_db_name new_class._reverse_db_field_map[id_db_name] = id_name # Prepend the ID field to _fields_ordered (so that it's *always* # the first field). new_class._fields_ordered = (id_name,) + new_class._fields_ordered # Merge in exceptions with parent hierarchy. exceptions_to_merge = (DoesNotExist, MultipleObjectsReturned) module = attrs.get("__module__") for exc in exceptions_to_merge: name = exc.__name__ parents = tuple( getattr(base, name) for base in flattened_bases if hasattr(base, name) ) or (exc,) # Create a new exception and set it as an attribute on the new # class. exception = type(name, parents, {"__module__": module}) setattr(new_class, name, exception) return new_class @classmethod def get_auto_id_names(mcs, new_class): """Find a name for the automatic ID field for the given new class. Return a two-element tuple where the first item is the field name (i.e. the attribute name on the object) and the second element is the DB field name (i.e. the name of the key stored in MongoDB). Defaults to ('id', '_id'), or generates a non-clashing name in the form of ('auto_id_X', '_auto_id_X') if the default name is already taken. """ id_name, id_db_name = ("id", "_id") existing_fields = {field_name for field_name in new_class._fields} existing_db_fields = {v.db_field for v in new_class._fields.values()} if id_name not in existing_fields and id_db_name not in existing_db_fields: return id_name, id_db_name id_basename, id_db_basename, i = ("auto_id", "_auto_id", 0) for i in itertools.count(): id_name = f"{id_basename}_{i}" id_db_name = f"{id_db_basename}_{i}" if id_name not in existing_fields and id_db_name not in existing_db_fields: return id_name, id_db_name class MetaDict(dict): """Custom dictionary for meta classes. Handles the merging of set indexes """ _merge_options = ("indexes",) def merge(self, new_options): for k, v in new_options.items(): if k in self._merge_options: self[k] = self.get(k, []) + v else: self[k] = v class BasesTuple(tuple): """Special class to handle introspection of bases tuple in __new__""" pass
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class FederationDriverBase(object, metaclass=abc.ABCMeta): @abc.abstractmethod def create_idp(self, idp_id, idp): """Create an identity provider. :param idp_id: ID of IdP object :type idp_id: string :param idp: idp object :type idp: dict :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_idp(self, idp_id): """Delete an identity provider. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_idp(self, idp_id): """Get an identity provider by ID. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_idp_from_remote_id(self, remote_id): """Get an identity provider by remote ID. :param remote_id: ID of remote IdP :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_idp(self, idp_id, idp): """Update an identity provider by ID. :param idp_id: ID of IdP object :type idp_id: string :param idp: idp object :type idp: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_protocol(self, idp_id, protocol_id, protocol): """Add an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :param protocol: protocol object :type protocol: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_protocol(self, idp_id, protocol_id, protocol): """Change an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :param protocol: protocol object :type protocol: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_protocol(self, idp_id, protocol_id): """Get an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_protocols(self, idp_id): """List an IdP's supported protocols. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: list of protocol ref :rtype: list of dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_protocol(self, idp_id, protocol_id): """Delete an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_mapping(self, mapping_id, mapping): """Create a mapping. :param mapping_id: ID of mapping object :type mapping_id: string :param mapping: mapping ref with mapping name :type mapping: dict :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_mapping(self, mapping_id): """Delete a mapping. :param mapping_id: id of mapping to delete :type mapping_ref: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_mapping(self, mapping_id, mapping_ref): """Update a mapping. :param mapping_id: id of mapping to update :type mapping_id: string :param mapping_ref: new mapping ref :type mapping_ref: dict :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_mappings(self): """List all mappings. :returns: list of mapping refs :rtype: list of dicts """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_mapping(self, mapping_id): """Get a mapping, returns the mapping based on mapping_id. :param mapping_id: id of mapping to get :type mapping_ref: string :raises keystone.exception.MappingNotFound: If the mapping cannot be found. :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id): """Get mapping based on idp_id and protocol_id. :param idp_id: id of the identity provider :type idp_id: string :param protocol_id: id of the protocol :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_sp(self, sp_id, sp): """Create a service provider. :param sp_id: id of the service provider :type sp_id: string :param sp: service provider object :type sp: dict :returns: service provider ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_sp(self, sp_id): """Delete a service provider. :param sp_id: id of the service provider :type sp_id: string :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_sp(self, sp_id): """Get a service provider. :param sp_id: id of the service provider :type sp_id: string :returns: service provider ref :rtype: dict :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_sp(self, sp_id, sp): """Update a service provider. :param sp_id: id of the service provider :type sp_id: string :param sp: service prvider object :type sp: dict :returns: service provider ref :rtype: dict :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_enabled_service_providers(self): """List enabled service providers for Service Catalog. Service Provider in a catalog contains three attributes: ``id``, ``auth_url``, ``sp_url``, where: - id is a unique, user defined identifier for service provider object - auth_url is an authentication URL of remote Keystone - sp_url a URL accessible at the remote service provider where SAML assertion is transmitted. :returns: list of dictionaries with enabled service providers :rtype: list of dicts """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_idps(self, hints): """List all identity providers. :param hints: filter hints which the driver should implement if at all possible. :returns: list of idp refs :rtype: list of dicts :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_sps(self, hints): """List all service providers. :param hints: filter hints which the driver should implement if at all possible. :returns: List of service provider ref objects :rtype: list of dicts :raises keystone.exception.ServiceProviderNotFound: If the SP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the listtransactions API from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, COIN from io import BytesIO def txFromHex(hexstring): tx = CTransaction() f = BytesIO(hex_str_to_bytes(hexstring)) tx.deserialize(f) return tx class ListTransactionsTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def setup_nodes(self): #This test requires mocktime enable_mocktime() return start_nodes(self.num_nodes, self.options.tmpdir) def run_test(self): # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0}) assert_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0}) # mine a block, confirmations should change: self.nodes[0].generate(1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid":txid}, {"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1}) assert_array_result(self.nodes[1].listtransactions(), {"txid":txid}, {"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1}) # send-to-self: txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) assert_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"send"}, {"amount":Decimal("-0.2")}) assert_array_result(self.nodes[0].listtransactions(), {"txid":txid, "category":"receive"}, {"amount":Decimal("0.2")}) # sendmany from node1: twice to self, twice to node2: send_to = { self.nodes[0].getnewaddress() : 0.11, self.nodes[1].getnewaddress() : 0.22, self.nodes[0].getaccountaddress("from1") : 0.33, self.nodes[1].getaccountaddress("toself") : 0.44 } txid = self.nodes[1].sendmany("", send_to) self.sync_all() assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.11")}, {"txid":txid} ) assert_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.11")}, {"txid":txid} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.22")}, {"txid":txid} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.22")}, {"txid":txid} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.33")}, {"txid":txid} ) assert_array_result(self.nodes[0].listtransactions(), {"category":"receive","amount":Decimal("0.33")}, {"txid":txid, "account" : "from1"} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"send","amount":Decimal("-0.44")}, {"txid":txid, "account" : ""} ) assert_array_result(self.nodes[1].listtransactions(), {"category":"receive","amount":Decimal("0.44")}, {"txid":txid, "account" : "toself"} ) multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()]) self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) self.nodes[1].generate(1) self.sync_all() assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0) assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True), {"category":"receive","amount":Decimal("0.1")}, {"txid":txid, "account" : "watchonly"} ) self.run_rbf_opt_in_test() # Check that the opt-in-rbf flag works properly, for sent and received # transactions. def run_rbf_opt_in_test(self): # Check whether a transaction signals opt-in RBF itself def is_opt_in(node, txid): rawtx = node.getrawtransaction(txid, 1) for x in rawtx["vin"]: if x["sequence"] < 0xfffffffe: return True return False # Find an unconfirmed output matching a certain txid def get_unconfirmed_utxo_entry(node, txid_to_match): utxo = node.listunspent(0, 0) for i in utxo: if i["txid"] == txid_to_match: return i return None # 1. Chain a few transactions that don't opt-in. txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) assert(not is_opt_in(self.nodes[0], txid_1)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"}) # Tx2 will build off txid_1, still not opting in to RBF. utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) # Create tx2 using createrawtransaction inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}] outputs = {self.nodes[0].getnewaddress(): 0.999} tx2 = self.nodes[1].createrawtransaction(inputs, outputs) tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"] txid_2 = self.nodes[1].sendrawtransaction(tx2_signed) # ...and check the result assert(not is_opt_in(self.nodes[1], txid_2)) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) sync_mempools(self.nodes) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"}) # Tx3 will opt-in to RBF utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2) inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}] outputs = {self.nodes[1].getnewaddress(): 0.998} tx3 = self.nodes[0].createrawtransaction(inputs, outputs) tx3_modified = txFromHex(tx3) tx3_modified.vin[0].nSequence = 0 tx3 = bytes_to_hex_str(tx3_modified.serialize()) tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex'] txid_3 = self.nodes[0].sendrawtransaction(tx3_signed) assert(is_opt_in(self.nodes[0], txid_3)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"}) # Tx4 will chain off tx3. Doesn't signal itself, but depends on one # that does. utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3) inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}] outputs = {self.nodes[0].getnewaddress(): 0.997} tx4 = self.nodes[1].createrawtransaction(inputs, outputs) tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"] txid_4 = self.nodes[1].sendrawtransaction(tx4_signed) assert(not is_opt_in(self.nodes[1], txid_4)) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) sync_mempools(self.nodes) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"}) # Replace tx3, and check that tx4 becomes unknown tx3_b = tx3_modified tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee tx3_b = bytes_to_hex_str(tx3_b.serialize()) tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex'] txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True) assert(is_opt_in(self.nodes[0], txid_3b)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"}) # Check gettransaction as well: for n in self.nodes[0:2]: assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no") assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no") assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes") assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes") assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown") # After mining a transaction, it's no longer BIP125-replaceable self.nodes[0].generate(1) assert(txid_3b not in self.nodes[0].getrawmempool()) assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no") assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown") if __name__ == '__main__': ListTransactionsTest().main()
""" Support for IP Webcam, an Android app that acts as a full-featured webcam. For more details about this component, please refer to the documentation at https://home-assistant.io/components/android_ip_webcam/ """ import asyncio import logging from datetime import timedelta import voluptuous as vol from homeassistant.core import callback from homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD, CONF_SENSORS, CONF_SWITCHES, CONF_TIMEOUT, CONF_SCAN_INTERVAL, CONF_PLATFORM) from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers import discovery import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import ( async_dispatcher_send, async_dispatcher_connect) from homeassistant.helpers.entity import Entity from homeassistant.helpers.event import async_track_point_in_utc_time from homeassistant.util.dt import utcnow from homeassistant.components.camera.mjpeg import ( CONF_MJPEG_URL, CONF_STILL_IMAGE_URL) REQUIREMENTS = ['pydroid-ipcam==0.8'] _LOGGER = logging.getLogger(__name__) ATTR_AUD_CONNS = 'Audio Connections' ATTR_HOST = 'host' ATTR_VID_CONNS = 'Video Connections' CONF_MOTION_SENSOR = 'motion_sensor' DATA_IP_WEBCAM = 'android_ip_webcam' DEFAULT_NAME = 'IP Webcam' DEFAULT_PORT = 8080 DEFAULT_TIMEOUT = 10 DOMAIN = 'android_ip_webcam' SCAN_INTERVAL = timedelta(seconds=10) SIGNAL_UPDATE_DATA = 'android_ip_webcam_update' KEY_MAP = { 'audio_connections': 'Audio Connections', 'adet_limit': 'Audio Trigger Limit', 'antibanding': 'Anti-banding', 'audio_only': 'Audio Only', 'battery_level': 'Battery Level', 'battery_temp': 'Battery Temperature', 'battery_voltage': 'Battery Voltage', 'coloreffect': 'Color Effect', 'exposure': 'Exposure Level', 'exposure_lock': 'Exposure Lock', 'ffc': 'Front-facing Camera', 'flashmode': 'Flash Mode', 'focus': 'Focus', 'focus_homing': 'Focus Homing', 'focus_region': 'Focus Region', 'focusmode': 'Focus Mode', 'gps_active': 'GPS Active', 'idle': 'Idle', 'ip_address': 'IPv4 Address', 'ipv6_address': 'IPv6 Address', 'ivideon_streaming': 'Ivideon Streaming', 'light': 'Light Level', 'mirror_flip': 'Mirror Flip', 'motion': 'Motion', 'motion_active': 'Motion Active', 'motion_detect': 'Motion Detection', 'motion_event': 'Motion Event', 'motion_limit': 'Motion Limit', 'night_vision': 'Night Vision', 'night_vision_average': 'Night Vision Average', 'night_vision_gain': 'Night Vision Gain', 'orientation': 'Orientation', 'overlay': 'Overlay', 'photo_size': 'Photo Size', 'pressure': 'Pressure', 'proximity': 'Proximity', 'quality': 'Quality', 'scenemode': 'Scene Mode', 'sound': 'Sound', 'sound_event': 'Sound Event', 'sound_timeout': 'Sound Timeout', 'torch': 'Torch', 'video_connections': 'Video Connections', 'video_chunk_len': 'Video Chunk Length', 'video_recording': 'Video Recording', 'video_size': 'Video Size', 'whitebalance': 'White Balance', 'whitebalance_lock': 'White Balance Lock', 'zoom': 'Zoom' } ICON_MAP = { 'audio_connections': 'mdi:speaker', 'battery_level': 'mdi:battery', 'battery_temp': 'mdi:thermometer', 'battery_voltage': 'mdi:battery-charging-100', 'exposure_lock': 'mdi:camera', 'ffc': 'mdi:camera-front-variant', 'focus': 'mdi:image-filter-center-focus', 'gps_active': 'mdi:crosshairs-gps', 'light': 'mdi:flashlight', 'motion': 'mdi:run', 'night_vision': 'mdi:weather-night', 'overlay': 'mdi:monitor', 'pressure': 'mdi:gauge', 'proximity': 'mdi:map-marker-radius', 'quality': 'mdi:quality-high', 'sound': 'mdi:speaker', 'sound_event': 'mdi:speaker', 'sound_timeout': 'mdi:speaker', 'torch': 'mdi:white-balance-sunny', 'video_chunk_len': 'mdi:video', 'video_connections': 'mdi:eye', 'video_recording': 'mdi:record-rec', 'whitebalance_lock': 'mdi:white-balance-auto' } SWITCHES = ['exposure_lock', 'ffc', 'focus', 'gps_active', 'night_vision', 'overlay', 'torch', 'whitebalance_lock', 'video_recording'] SENSORS = ['audio_connections', 'battery_level', 'battery_temp', 'battery_voltage', 'light', 'motion', 'pressure', 'proximity', 'sound', 'video_connections'] CONFIG_SCHEMA = vol.Schema({ DOMAIN: vol.All(cv.ensure_list, [vol.Schema({ vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period, vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string, vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string, vol.Optional(CONF_SWITCHES, default=None): vol.All(cv.ensure_list, [vol.In(SWITCHES)]), vol.Optional(CONF_SENSORS, default=None): vol.All(cv.ensure_list, [vol.In(SENSORS)]), vol.Optional(CONF_MOTION_SENSOR, default=None): cv.boolean, })]) }, extra=vol.ALLOW_EXTRA) @asyncio.coroutine def async_setup(hass, config): """Set up the IP Webcam component.""" from pydroid_ipcam import PyDroidIPCam webcams = hass.data[DATA_IP_WEBCAM] = {} websession = async_get_clientsession(hass) @asyncio.coroutine def async_setup_ipcamera(cam_config): """Set up an IP camera.""" host = cam_config[CONF_HOST] username = cam_config.get(CONF_USERNAME) password = cam_config.get(CONF_PASSWORD) name = cam_config[CONF_NAME] interval = cam_config[CONF_SCAN_INTERVAL] switches = cam_config[CONF_SWITCHES] sensors = cam_config[CONF_SENSORS] motion = cam_config[CONF_MOTION_SENSOR] # Init ip webcam cam = PyDroidIPCam( hass.loop, websession, host, cam_config[CONF_PORT], username=username, password=password, timeout=cam_config[CONF_TIMEOUT] ) if switches is None: switches = [setting for setting in cam.enabled_settings if setting in SWITCHES] if sensors is None: sensors = [sensor for sensor in cam.enabled_sensors if sensor in SENSORS] sensors.extend(['audio_connections', 'video_connections']) if motion is None: motion = 'motion_active' in cam.enabled_sensors @asyncio.coroutine def async_update_data(now): """Update data from IP camera in SCAN_INTERVAL.""" yield from cam.update() async_dispatcher_send(hass, SIGNAL_UPDATE_DATA, host) async_track_point_in_utc_time( hass, async_update_data, utcnow() + interval) yield from async_update_data(None) # Load platforms webcams[host] = cam mjpeg_camera = { CONF_PLATFORM: 'mjpeg', CONF_MJPEG_URL: cam.mjpeg_url, CONF_STILL_IMAGE_URL: cam.image_url, CONF_NAME: name, } if username and password: mjpeg_camera.update({ CONF_USERNAME: username, CONF_PASSWORD: password }) hass.async_add_job(discovery.async_load_platform( hass, 'camera', 'mjpeg', mjpeg_camera, config)) if sensors: hass.async_add_job(discovery.async_load_platform( hass, 'sensor', DOMAIN, { CONF_NAME: name, CONF_HOST: host, CONF_SENSORS: sensors, }, config)) if switches: hass.async_add_job(discovery.async_load_platform( hass, 'switch', DOMAIN, { CONF_NAME: name, CONF_HOST: host, CONF_SWITCHES: switches, }, config)) if motion: hass.async_add_job(discovery.async_load_platform( hass, 'binary_sensor', DOMAIN, { CONF_HOST: host, CONF_NAME: name, }, config)) tasks = [async_setup_ipcamera(conf) for conf in config[DOMAIN]] if tasks: yield from asyncio.wait(tasks, loop=hass.loop) return True class AndroidIPCamEntity(Entity): """The Android device running IP Webcam.""" def __init__(self, host, ipcam): """Initialize the data oject.""" self._host = host self._ipcam = ipcam @asyncio.coroutine def async_added_to_hass(self): """Register update dispatcher.""" @callback def async_ipcam_update(host): """Update callback.""" if self._host != host: return self.hass.async_add_job(self.async_update_ha_state(True)) async_dispatcher_connect( self.hass, SIGNAL_UPDATE_DATA, async_ipcam_update) @property def should_poll(self): """Return True if entity has to be polled for state.""" return False @property def available(self): """Return True if entity is available.""" return self._ipcam.available @property def device_state_attributes(self): """Return the state attributes.""" state_attr = {ATTR_HOST: self._host} if self._ipcam.status_data is None: return state_attr state_attr[ATTR_VID_CONNS] = \ self._ipcam.status_data.get('video_connections') state_attr[ATTR_AUD_CONNS] = \ self._ipcam.status_data.get('audio_connections') return state_attr
"""Project constants Default values and other various configuration for projects, including available theme names and repository types. """ import re from django.utils.translation import ugettext_lazy as _ THEME_DEFAULT = 'default' THEME_SPHINX = 'sphinxdoc' THEME_SCROLLS = 'scrolls' THEME_AGOGO = 'agogo' THEME_TRADITIONAL = 'traditional' THEME_NATURE = 'nature' THEME_HAIKU = 'haiku' DOCUMENTATION_CHOICES = ( ('auto', _('Automatically Choose')), ('sphinx', _('Sphinx Html')), ('mkdocs', _('Mkdocs (Markdown)')), ('sphinx_htmldir', _('Sphinx HtmlDir')), ('sphinx_singlehtml', _('Sphinx Single Page HTML')), ) DEFAULT_THEME_CHOICES = ( # Translators: This is a name of a Sphinx theme. (THEME_DEFAULT, _('Default')), # Translators: This is a name of a Sphinx theme. (THEME_SPHINX, _('Sphinx Docs')), # (THEME_SCROLLS, 'Scrolls'), # (THEME_AGOGO, 'Agogo'), # Translators: This is a name of a Sphinx theme. (THEME_TRADITIONAL, _('Traditional')), # Translators: This is a name of a Sphinx theme. (THEME_NATURE, _('Nature')), # Translators: This is a name of a Sphinx theme. (THEME_HAIKU, _('Haiku')), ) SAMPLE_FILES = ( ('Installation', 'projects/samples/installation.rst.html'), ('Getting started', 'projects/samples/getting_started.rst.html'), ) SCRAPE_CONF_SETTINGS = [ 'copyright', 'project', 'version', 'release', 'source_suffix', 'html_theme', 'extensions', ] HEADING_MARKUP = ( (1, '='), (2, '-'), (3, '^'), (4, '"'), ) LIVE_STATUS = 1 DELETED_STATUS = 99 STATUS_CHOICES = ( (LIVE_STATUS, _('Live')), (DELETED_STATUS, _('Deleted')), ) REPO_TYPE_GIT = 'git' REPO_TYPE_SVN = 'svn' REPO_TYPE_HG = 'hg' REPO_TYPE_BZR = 'bzr' REPO_TYPE_P4 = 'p4' REPO_CHOICES = ( (REPO_TYPE_GIT, _('Git')), (REPO_TYPE_SVN, _('Subversion')), (REPO_TYPE_HG, _('Mercurial')), (REPO_TYPE_BZR, _('Bazaar')), (REPO_TYPE_P4, _('Perforce')), ) PUBLIC = 'public' PROTECTED = 'protected' PRIVATE = 'private' PRIVACY_CHOICES = ( (PUBLIC, _('Public')), (PROTECTED, _('Protected')), (PRIVATE, _('Private')), ) IMPORTANT_VERSION_FILTERS = { 'slug': 'important' } # in the future this constant can be replaced with a implementation that # detect all available Python interpreters in the fly (Maybe using # update-alternatives linux tool family?). PYTHON_CHOICES = ( ('python', _('CPython 2.x')), ('python3', _('CPython 3.x')), ) # Via http://sphinx-doc.org/latest/config.html#confval-language # Languages supported for the lang_slug in the URL # Translations for builtin Sphinx messages only available for a subset of these LANGUAGES = ( ("aa", "Afar"), ("ab", "Abkhaz"), ("af", "Afrikaans"), ("am", "Amharic"), ("ar", "Arabic"), ("as", "Assamese"), ("ay", "Aymara"), ("az", "Azerbaijani"), ("ba", "Bashkir"), ("be", "Belarusian"), ("bg", "Bulgarian"), ("bh", "Bihari"), ("bi", "Bislama"), ("bn", "Bengali"), ("bo", "Tibetan"), ("br", "Breton"), ("ca", "Catalan"), ("co", "Corsican"), ("cs", "Czech"), ("cy", "Welsh"), ("da", "Danish"), ("de", "German"), ("dz", "Dzongkha"), ("el", "Greek"), ("en", "English"), ("eo", "Esperanto"), ("es", "Spanish"), ("et", "Estonian"), ("eu", "Basque"), ("fa", "Iranian"), ("fi", "Finnish"), ("fj", "Fijian"), ("fo", "Faroese"), ("fr", "French"), ("fy", "Western Frisian"), ("ga", "Irish"), ("gd", "Scottish Gaelic"), ("gl", "Galician"), ("gn", "Guarani"), ("gu", "Gujarati"), ("ha", "Hausa"), ("hi", "Hindi"), ("he", "Hebrew"), ("hr", "Croatian"), ("hu", "Hungarian"), ("hy", "Armenian"), ("ia", "Interlingua"), ("id", "Indonesian"), ("ie", "Interlingue"), ("ik", "Inupiaq"), ("is", "Icelandic"), ("it", "Italian"), ("iu", "Inuktitut"), ("ja", "Japanese"), ("jv", "Javanese"), ("ka", "Georgian"), ("kk", "Kazakh"), ("kl", "Kalaallisut"), ("km", "Khmer"), ("kn", "Kannada"), ("ko", "Korean"), ("ks", "Kashmiri"), ("ku", "Kurdish"), ("ky", "Kyrgyz"), ("la", "Latin"), ("ln", "Lingala"), ("lo", "Lao"), ("lt", "Lithuanian"), ("lv", "Latvian"), ("mg", "Malagasy"), ("mi", "Maori"), ("mk", "Macedonian"), ("ml", "Malayalam"), ("mn", "Mongolian"), ("mr", "Marathi"), ("ms", "Malay"), ("mt", "Maltese"), ("my", "Burmese"), ("na", "Nauru"), ("ne", "Nepali"), ("nl", "Dutch"), ("no", "Norwegian"), ("oc", "Occitan"), ("om", "Oromo"), ("or", "Oriya"), ("pa", "Panjabi"), ("pl", "Polish"), ("ps", "Pashto"), ("pt", "Portuguese"), ("qu", "Quechua"), ("rm", "Romansh"), ("rn", "Kirundi"), ("ro", "Romanian"), ("ru", "Russian"), ("rw", "Kinyarwanda"), ("sa", "Sanskrit"), ("sd", "Sindhi"), ("sg", "Sango"), ("si", "Sinhala"), ("sk", "Slovak"), ("sl", "Slovenian"), ("sm", "Samoan"), ("sn", "Shona"), ("so", "Somali"), ("sq", "Albanian"), ("sr", "Serbian"), ("ss", "Swati"), ("st", "Southern Sotho"), ("su", "Sudanese"), ("sv", "Swedish"), ("sw", "Swahili"), ("ta", "Tamil"), ("te", "Telugu"), ("tg", "Tajik"), ("th", "Thai"), ("ti", "Tigrinya"), ("tk", "Turkmen"), ("tl", "Tagalog"), ("tn", "Tswana"), ("to", "Tonga"), ("tr", "Turkish"), ("ts", "Tsonga"), ("tt", "Tatar"), ("tw", "Twi"), ("ug", "Uyghur"), ("uk", "Ukrainian"), ("ur", "Urdu"), ("uz", "Uzbek"), ("vi", "Vietnamese"), ("vo", "Volapuk"), ("wo", "Wolof"), ("xh", "Xhosa"), ("yi", "Yiddish"), ("yo", "Yoruba"), ("za", "Zhuang"), ("zh", "Chinese"), ("zu", "Zulu"), # Try these to test our non-2 letter language support ("nb_NO", "Norwegian Bokmal"), ("pt_BR", "Brazilian Portuguese"), ("uk_UA", "Ukrainian"), ("zh_CN", "Simplified Chinese"), ("zh_TW", "Traditional Chinese"), ) LANGUAGES_REGEX = "|".join( [re.escape(code[0]) for code in LANGUAGES] ) PROGRAMMING_LANGUAGES = ( ("words", "Only Words"), ("py", "Python"), ("js", "Javascript"), ("php", "PHP"), ("ruby", "Ruby"), ("perl", "Perl"), ("java", "Java"), ("go", "Go"), ("julia", "Julia"), ("c", "C"), ("csharp", "C#"), ("cpp", "C++"), ("objc", "Objective-C"), ("other", "Other"), ) LOG_TEMPLATE = u"(Build) [{project}:{version}] {msg}" PROJECT_PK_REGEX = '(?:[-\w]+)' PROJECT_SLUG_REGEX = '(?:[-\w]+)' GITHUB_REGEXS = [ re.compile('github.com/(.+)/(.+)(?:\.git){1}'), re.compile('github.com/(.+)/(.+)'), re.compile('github.com:(.+)/(.+).git'), ] BITBUCKET_REGEXS = [ re.compile('bitbucket.org/(.+)/(.+).git'), re.compile('bitbucket.org/(.+)/(.+)/'), re.compile('bitbucket.org/(.+)/(.+)'), ] GITHUB_URL = ('https://github.com/{user}/{repo}/' '{action}/{version}{docroot}{path}{source_suffix}') BITBUCKET_URL = ('https://bitbucket.org/{user}/{repo}/' 'src/{version}{docroot}{path}{source_suffix}')
# -*- coding: utf-8 -*- import numpy as np import phdata import logging def Read3ddata(fname): """ Reads the file provided by fname as a 3ddose file with the following format: |nx ny nz |xBoundary |yBoundary |zBoundary |data where nx, ny, nz are integers providing the size in each dimension. xBoundary is a vector of nx+1 floats. yBoundary is a vector of ny+1 floats. zBoundary is a vector if nz+1 floats. data is a vector of nx*ny*nz floats. Please note that the 3ddose file dimensions are in cm. We are using mm, so a transformation will be made while reading the file. Parameters -------- fname: string A string pointing to a file on the Hdd. Returns ------- phdata: phantom data object, which contains boundaries and dose data Raises ------ IOerror: In case the file doesn't exist, and IOError('Invalid file name') will be raised """ if fname == None: return None try: fileHandle = open(fname, 'r') except IOError: raise IOError("Invalid file name") with fileHandle: #read in the dimensions line = fileHandle.readline() logging.debug(line) (nx, ny, nz) = GetDimensions(line) logging.info("NNs: {0} {1} {2}".format(nx, ny, nz)) line = fileHandle.readline() logging.debug(line) bx = GetBoundaries(nx, line) logging.info(str(bx)) line = fileHandle.readline() logging.debug(line) by = GetBoundaries(ny, line) logging.info(str(by)) line = fileHandle.readline(); logging.debug(line) bz = GetBoundaries(nz, line) logging.info(str(bz)) phd = phdata.phdata(bx, by, bz) #create dose matrix line = fileHandle.readline() logging.debug(line) split = line.split(" ") split = [x for x in split if x] # remove empty lines if len(split) != nz*ny*nx: raise RuntimeError("Data and ph dimensions are not compatible") data = ph.data() k = 0 for iz in range(0, nz): for iy in range(0, ny): for ix in range(0, nx): data[ix,iy,iz] = float(split[k]) k += 1 logging.info("Done with constructing") return phd def Read3ddose(fname): """ Reads the file provided by fname as a 3ddose file with the following format: |nx ny nz |xBoundary |yBoundary |zBoundary |data where nx, ny, nz are integers providing the size in each dimension. xBoundary is a vector of nx+1 floats. yBoundary is a vector of ny+1 floats. zBoundary is a vector if nz+1 floats. data is a vector of nx*ny*nz floats. Please note that the 3ddose file dimensions are in cm. We are using mm, so a transformation will be made while reading the file. Parameters -------- fname: string A string pointing to a file on the Hdd. Returns ------- nx: int An integer representing the size on X axis. ny: int An integer representing the size on Y axis. nz: int An integer representing the size on Z axis. xBoundary: float[nx+1] A vector of nx+1 floats representing the x boundary in mm yBoundary: float[ny+1] A vector of ny+1 floats representing the y boundary in mm zBoundary: float[nz+1] A vector of nz+1 floats representing the z boundary in mm dose: float[nx*ny*nz] A vector of nx*ny*nz floats representing the dose values Raises ------ IOerror: In case the file doesn't exist, and IOError('Invalid file name') will be raised """ if fname == None: return None try: fileHandle = open(fname, 'r') except IOError: raise IOError("Invalid file name") with fileHandle: #read in the dimensions line = fileHandle.readline() (nx, ny, nz) = GetDimensions(line) print(nx, ny, nz) line = fileHandle.readline() bx = GetBoundaries(nx, line) print(bx) line = fileHandle.readline() by = GetBoundaries(ny, line) print(by) line = fileHandle.readline(); bz = GetBoundaries(nz, line) print(bz) #create dose matrix line = fileHandle.readline() dose3ddose = Get3ddata(nx, ny, nz, line) return (nx,ny,nz,bx,by,bz,dose3ddose) def GetDimensions(line): """ Parse and extract X, Y and Z dimensions from string Parameters ---------- line: string Line containing x, y, z dimensions Returns ------- (nx,ny,nz): (int,int,int) The dimensions on x, y, and z coordinate respectively Raises ------ ValueError: In case we try to parse a string to an int/float but the file is not in the expected format, this error will be raised """ try: split = line.split(" ") split = [x for x in split if x] # remove empty lines nx = int(split[0]) ny = int(split[1]) nz = int(split[2]) return (nx, ny, nz) except ValueError: raise ValueError('Invalid file format') def GetBoundaries(n, line): """ Parse and extract a boundary of n+1 elements from a line of text Parameters ---------- n: int Number of elements line: string line containing boundary data Returns ------- Array of n+1 floats representing the boundary """ split = line.split(" ") split = [x for x in split if x] # remove empty lines boundaries = [] # we assume boundaries and n is equal if len(split) != n+1: raise RuntimeError("GetBoundaries: Wrong number of boundaries") for i in range(0,n+1): d = float(split[i]) boundaries.append(d) if boundaries.count == 0: return None return boundaries def Get3ddata(nx, ny, nz, line): """ Parses a line and converts it to 3D dose representation Parameters ---------- nx: int Nof X points ny: int Nof Y points nz: int Nof Z points line: string String which contains all 3D dose data points Returns ------- 3D dose data as NumPy object """ split = line.split(" ") split = [x for x in split if x] # remove empty lines data = np.empty((nx,ny,nz), dtype=np.float32) k = 0 for iz in range(0, nz): for iy in range(0, ny): for ix in range(0, nx): data[ix,iy,iz] = float(split[k]) k += 1 return data
"""This module contains a LevelDB client.""" import binascii import rlp from mythril.ethereum.interface.leveldb.accountindexing import CountableList from mythril.ethereum.interface.leveldb.accountindexing import ( ReceiptForStorage, AccountIndexer, ) import logging from ethereum import utils from ethereum.block import BlockHeader, Block from mythril.ethereum.interface.leveldb.state import State from mythril.ethereum.interface.leveldb.eth_db import ETH_DB from mythril.ethereum.evmcontract import EVMContract from mythril.exceptions import AddressNotFoundError log = logging.getLogger(__name__) # Per https://github.com/ethereum/go-ethereum/blob/master/core/rawdb/schema.go # prefixes and suffixes for keys in geth header_prefix = b"h" # header_prefix + num (uint64 big endian) + hash -> header body_prefix = b"b" # body_prefix + num (uint64 big endian) + hash -> block body num_suffix = b"n" # header_prefix + num (uint64 big endian) + num_suffix -> hash block_hash_prefix = b"H" # block_hash_prefix + hash -> num (uint64 big endian) block_receipts_prefix = ( b"r" ) # block_receipts_prefix + num (uint64 big endian) + hash -> block receipts # known geth keys head_header_key = b"LastBlock" # head (latest) header hash # custom prefixes address_prefix = b"AM" # address_prefix + hash -> address # custom keys address_mapping_head_key = b"accountMapping" # head (latest) number of indexed block def _format_block_number(number): """Format block number to uint64 big endian.""" return utils.zpad(utils.int_to_big_endian(number), 8) def _encode_hex(v): """Encode a hash string as hex.""" return "0x" + utils.encode_hex(v) class LevelDBReader(object): """LevelDB reading interface, can be used with snapshot.""" def __init__(self, db): """ :param db: """ self.db = db self.head_block_header = None self.head_state = None def _get_head_state(self): """Get head state. :return: """ if not self.head_state: root = self._get_head_block().state_root self.head_state = State(self.db, root) return self.head_state def _get_account(self, address): """Get account by address. :param address: :return: """ state = self._get_head_state() account_address = binascii.a2b_hex(utils.remove_0x_head(address)) return state.get_and_cache_account(account_address) def _get_block_hash(self, number): """Get block hash by block number. :param number: :return: """ num = _format_block_number(number) hash_key = header_prefix + num + num_suffix return self.db.get(hash_key) def _get_head_block(self): """Get head block header. :return: """ if not self.head_block_header: block_hash = self.db.get(head_header_key) num = self._get_block_number(block_hash) self.head_block_header = self._get_block_header(block_hash, num) # find header with valid state while ( not self.db.get(self.head_block_header.state_root) and self.head_block_header.prevhash is not None ): block_hash = self.head_block_header.prevhash num = self._get_block_number(block_hash) self.head_block_header = self._get_block_header(block_hash, num) return self.head_block_header def _get_block_number(self, block_hash): """Get block number by its hash. :param block_hash: :return: """ number_key = block_hash_prefix + block_hash return self.db.get(number_key) def _get_block_header(self, block_hash, num): """Get block header by block header hash & number. :param block_hash: :param num: :return: """ header_key = header_prefix + num + block_hash block_header_data = self.db.get(header_key) header = rlp.decode(block_header_data, sedes=BlockHeader) return header def _get_address_by_hash(self, block_hash): """Get mapped address by its hash. :param block_hash: :return: """ address_key = address_prefix + block_hash return self.db.get(address_key) def _get_last_indexed_number(self): """Get latest indexed block number. :return: """ return self.db.get(address_mapping_head_key) def _get_block_receipts(self, block_hash, num): """Get block transaction receipts by block header hash & number. :param block_hash: :param num: :return: """ number = _format_block_number(num) receipts_key = block_receipts_prefix + number + block_hash receipts_data = self.db.get(receipts_key) receipts = rlp.decode(receipts_data, sedes=CountableList(ReceiptForStorage)) return receipts class LevelDBWriter(object): """level db writing interface.""" def __init__(self, db): """ :param db: """ self.db = db self.wb = None def _set_last_indexed_number(self, number): """Set latest indexed block number. :param number: :return: """ return self.db.put(address_mapping_head_key, _format_block_number(number)) def _start_writing(self): """Start writing a batch.""" self.wb = self.db.write_batch() def _commit_batch(self): """Commit a batch.""" self.wb.write() def _store_account_address(self, address): """Get block transaction receipts by block header hash & number. :param address: """ address_key = address_prefix + utils.sha3(address) self.wb.put(address_key, address) class EthLevelDB(object): """Go-Ethereum LevelDB client class.""" def __init__(self, path): """ :param path: """ self.path = path self.db = ETH_DB(path) self.reader = LevelDBReader(self.db) self.writer = LevelDBWriter(self.db) def get_contracts(self): """Iterate through all contracts.""" for account in self.reader._get_head_state().get_all_accounts(): if account.code is not None: code = _encode_hex(account.code) contract = EVMContract(code, enable_online_lookup=False) yield contract, account.address, account.balance def search(self, expression, callback_func): """Search through all contract accounts. :param expression: :param callback_func: """ cnt = 0 indexer = AccountIndexer(self) for contract, address_hash, balance in self.get_contracts(): if contract.matches_expression(expression): try: address = _encode_hex(indexer.get_contract_by_hash(address_hash)) except AddressNotFoundError: """The hash->address mapping does not exist in our index. If the index is up-to-date, this likely means that the contract was created by an internal transaction. Skip this contract as right now we don't have a good solution for this. """ continue callback_func(contract, address, balance) cnt += 1 if not cnt % 1000: log.info("Searched %d contracts" % cnt) def contract_hash_to_address(self, contract_hash): """Try to find corresponding account address. :param contract_hash: :return: """ address_hash = binascii.a2b_hex(utils.remove_0x_head(contract_hash)) indexer = AccountIndexer(self) return _encode_hex(indexer.get_contract_by_hash(address_hash)) def eth_getBlockHeaderByNumber(self, number): """Get block header by block number. :param number: :return: """ block_hash = self.reader._get_block_hash(number) block_number = _format_block_number(number) return self.reader._get_block_header(block_hash, block_number) def eth_getBlockByNumber(self, number): """Get block body by block number. :param number: :return: """ block_hash = self.reader._get_block_hash(number) block_number = _format_block_number(number) body_key = body_prefix + block_number + block_hash block_data = self.db.get(body_key) body = rlp.decode(block_data, sedes=Block) return body def eth_getCode(self, address): """Get account code. :param address: :return: """ account = self.reader._get_account(address) return _encode_hex(account.code) def eth_getBalance(self, address): """Get account balance. :param address: :return: """ account = self.reader._get_account(address) return account.balance def eth_getStorageAt(self, address, position): """Get account storage data at position. :param address: :param position: :return: """ account = self.reader._get_account(address) return _encode_hex( utils.zpad(utils.encode_int(account.get_storage_data(position)), 32) )
########################################################################## # # Copyright (c) 2012, John Haddon. All rights reserved. # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import ctypes import functools import collections import arnold import IECore import IECoreArnold import imath import Gaffer import GafferUI import GafferArnold ########################################################################## # Utilities to make it easier to work with the Arnold API, which has a # fairly bare wrapping using ctypes. ########################################################################## def __aiMetadataGetStr( nodeEntry, paramName, name, defaultValue = None ) : value = arnold.AtStringReturn() if arnold.AiMetaDataGetStr( nodeEntry, paramName, name, value ) : return arnold.AtStringToStr( value ) return defaultValue def __aiMetadataGetBool( nodeEntry, paramName, name, defaultValue = None ) : value = ctypes.c_bool() if arnold.AiMetaDataGetBool( nodeEntry, paramName, name, value ) : return bool( value ) return defaultValue def __aiMetadataGetInt( nodeEntry, paramName, name, defaultValue = None ) : value = ctypes.c_int() if arnold.AiMetaDataGetInt( nodeEntry, paramName, name, value ) : return int( value.value ) return defaultValue def __aiMetadataGetFlt( nodeEntry, paramName, name, defaultValue = None ) : value = ctypes.c_float() if arnold.AiMetaDataGetFlt( nodeEntry, paramName, name, value ) : return float( value.value ) return defaultValue def __aiMetadataGetRGB( nodeEntry, paramName, name, defaultValue = None ) : value = arnold.AtRGB() if arnold.AiMetaDataGetRGB( nodeEntry, paramName, name, value ) : return imath.Color3f( value.r, value.g, value.b ) return defaultValue # SolidAngle does not appear to have wrapped AiMetaDataGetRGBA in Python, so we don't # support the RGBA case """ def __aiMetadataGetRGBA( nodeEntry, paramName, name, defaultValue = None ) : value = arnold.AtRGBA() if arnold.AiMetaDataGetRGBA( nodeEntry, paramName, name, value ) : return imath.Color4f( value.r, value.g, value.b, value.a ) return defaultValue """ def __aiMetadataGetVec2( nodeEntry, paramName, name, defaultValue = None ) : value = arnold.AtVector2() if arnold.AiMetaDataGetVec2( nodeEntry, paramName, name, value ) : return imath.V2f( value.x, value.y ) return defaultValue def __aiMetadataGetVec( nodeEntry, paramName, name, defaultValue = None ) : value = arnold.AtVector() if arnold.AiMetaDataGetVec( nodeEntry, paramName, name, value ) : return imath.V3f( value.x, value.y, value.z ) return defaultValue def __enumPresetValues( param ): presets = IECore.StringVectorData() enum = arnold.AiParamGetEnum( param ) while True : preset = arnold.AiEnumGetString( enum, len( presets ) ) if not preset : break presets.append( preset ) return presets def __plugPresetNames( nodeEntry, paramName ) : # options STRING "name:value|..." options = __aiMetadataGetStr( nodeEntry, paramName, "options" ) if options : return IECore.StringVectorData( [ o.partition( ":" )[0] for o in options.split( "|" ) if o ] ) def __plugPresetValues( nodeEntry, paramName, paramType ) : # options STRING "name:value|..." options = __aiMetadataGetStr( nodeEntry, paramName, "options" ) if not options : return None values = [ o.rpartition( ":" )[2] for o in options.split( "|" ) if o ] if paramType == arnold.AI_TYPE_STRING : return IECore.StringVectorData( values ) elif paramType in ( arnold.AI_TYPE_INT, arnold.AI_TYPE_BYTE ) : return IECore.IntVectorData( [ int( v ) for v in values ] ) elif paramType == arnold.AI_TYPE_UINT : return IECore.UIntVectorData( [ int( v ) for v in values ] ) elif paramType == arnold.AI_TYPE_FLOAT : return IECore.FloatVectorData( [ float( v ) for v in values ] ) elif paramType == arnold.AI_TYPE_BOOLEAN : falseVals = ( "false", "no", "0" ) return IECore.BoolVectorData( [ False if v.lower() in falseVals else True for v in values ] ) elif paramType == arnold.AI_TYPE_RGB : return IECore.Color3fVectorData( [ imath.Color3f( *[ float( x ) for x in v.split( "," ) ]) for v in values ] ) elif paramType == arnold.AI_TYPE_RGBA : return IECore.Color4fVectorData( [ imath.Color4f( *[ float( x ) for x in v.split( "," ) ]) for v in values ] ) elif paramType in ( arnold.AI_TYPE_VECTOR, arnold.AI_TYPE_POINT ): return IECore.V3fVectorData( [ imath.V3f( *[ float( x ) for x in v.split( "," ) ]) for v in values ] ) elif paramType == arnold.AI_TYPE_POINT2 : return IECore.V2fVectorData( [ imath.V2f( *[ float( x ) for x in v.split( "," ) ]) for v in values ] ) return None ########################################################################## # Build a registry of information retrieved from Arnold metadata. We fill this # once at startup, as we can only get it from within an AiUniverse block, # and we don't want to have to keep making those temporarily later. # # We take a pragmatic approach to what metadata we support, since there # are multiple conflicting "standards" in use in practice. In order of # precedence (most important first), we aim to support the following : # # - Arnold's metadata convention. This doesn't define much, but gives # us min/max/desc/linkable. # - The OSL metadata convention. This gives us a bit more, and is also # the convention we support already for RSL and OSL shaders. # # The alternative to this would be to add one more "standard" by defining # a Gaffer-specific convention, and then contribute to the AlShaders # project to add all the necessary metadata. This would be more work # for no real gain. ########################################################################## __metadata = collections.defaultdict( dict ) def __translateNodeMetadata( nodeEntry ) : nodeName = arnold.AiNodeEntryGetName( nodeEntry ) # Shader description. We support Arnold-style "desc" and # OSL style "help". description = __aiMetadataGetStr( nodeEntry, None, "desc", defaultValue = __aiMetadataGetStr( nodeEntry, None, "help" ) ) if description is not None : __metadata[nodeName]["description"] = description # Documentation URL. We support OSL-style "URL" url = __aiMetadataGetStr( nodeEntry, None, "URL" ) if url is not None : __metadata[nodeName]["documentation:url"] = url # Icon. There doesn't appear to be a standard for this, so # we support "gaffer.icon" and "gaffer.iconScale". icon = __aiMetadataGetStr( nodeEntry, None, "gaffer.icon" ) if icon is not None : __metadata[nodeName]["icon"] = icon iconScale = __aiMetadataGetFlt( nodeEntry, None, "gaffer.iconScale" ) if iconScale is not None : __metadata[nodeName]["iconScale"] = iconScale paramIt = arnold.AiNodeEntryGetParamIterator( nodeEntry ) while not arnold.AiParamIteratorFinished( paramIt ) : ## \todo We could allow custom ui types to be specified using # arnold metadata entries. param = arnold.AiParamIteratorGetNext( paramIt ) paramName = arnold.AiParamGetName( param ) paramPath = nodeName + ".parameters." + paramName paramType = arnold.AiParamGetType( param ) # Parameter description description = __aiMetadataGetStr( nodeEntry, paramName, "desc" ) if description is not None : __metadata[paramPath]["description"] = description # Presets if paramType == arnold.AI_TYPE_ENUM : # Parameter presets from enum values presetValues = __enumPresetValues( param ) presetNames = presetValues else : # Manually specified presets for other types presetValues = __plugPresetValues( nodeEntry, paramName, paramType ) presetNames = __plugPresetNames( nodeEntry, paramName ) if presetValues : __metadata[paramPath]["plugValueWidget:type"] = "GafferUI.PresetsPlugValueWidget" __metadata[paramPath]["presetValues"] = presetValues __metadata[paramPath]["presetNames"] = presetNames # Nodule type from linkable metadata and parameter type linkable = __aiMetadataGetBool( nodeEntry, paramName, "linkable", defaultValue = paramType not in ( arnold.AI_TYPE_BYTE, arnold.AI_TYPE_INT, arnold.AI_TYPE_UINT, arnold.AI_TYPE_BOOLEAN, arnold.AI_TYPE_ENUM, arnold.AI_TYPE_STRING ) ) __metadata[paramPath]["nodule:type"] = None if linkable else "" # PlugValueWidget type from OSL "widget" widget = None widget = __aiMetadataGetStr( nodeEntry, paramName, "widget", widget ) if widget is not None : __metadata[paramPath]["plugValueWidget:type"] = { "number" : "GafferUI.NumericPlugValueWidget", "string" : "GafferUI.StringPlugValueWidget", "boolean" : "GafferUI.BoolPlugValueWidget", "checkBox" : "GafferUI.BoolPlugValueWidget", "popup" : "GafferUI.PresetsPlugValueWidget", "mapper" : "GafferUI.PresetsPlugValueWidget", "filename" : "GafferUI.PathPlugValueWidget", "null" : "", }[widget] # Layout section from OSL "page". page = __aiMetadataGetStr( nodeEntry, paramName, "page" ) if page is not None : __metadata[paramPath]["layout:section"] = page # Uncollapse sections if desired collapsed = __aiMetadataGetBool( nodeEntry, None, "gaffer.layout.section.%s.collapsed" % page ) if collapsed == False : parent = paramPath.rsplit( '.', 1 )[0] __metadata[parent]["layout:section:%s:collapsed" % page] = collapsed # Label from OSL "label" label = __aiMetadataGetStr( nodeEntry, paramName, "label" ) if label is None : # Label from Arnold naming convention # Arnold uses snake_case rather than camelCase for naming, so translate this into # nice looking names label = " ".join( [ i.capitalize() for i in paramName.split( "_" ) ] ) __metadata[paramPath]["label"] = label __metadata[paramPath]["noduleLayout:label"] = label childComponents = { arnold.AI_TYPE_VECTOR2 : "xy", arnold.AI_TYPE_VECTOR : "xyz", arnold.AI_TYPE_RGB : "rgb", arnold.AI_TYPE_RGBA : "rgba", }.get( paramType ) if childComponents is not None : for c in childComponents : __metadata["{}.{}".format( paramPath, c )]["noduleLayout:label"] = "{}.{}".format( label, c ) # NodeEditor layout from other Gaffer-specific metadata divider = __aiMetadataGetBool( nodeEntry, paramName, "gaffer.layout.divider" ) if divider : __metadata[paramPath]["layout:divider"] = True index = __aiMetadataGetInt( nodeEntry, paramName, "gaffer.layout.index" ) if index is not None : __metadata[paramPath]["layout:index"] = index # GraphEditor visibility from Gaffer-specific metadata visible = __aiMetadataGetBool( nodeEntry, None, "gaffer.graphEditorLayout.defaultVisibility" ) visible = __aiMetadataGetBool( nodeEntry, paramName, "gaffer.graphEditorLayout.visible", visible ) if visible is not None : __metadata[paramPath]["noduleLayout:visible"] = visible userDefault = None if paramType in [ arnold.AI_TYPE_BYTE, arnold.AI_TYPE_INT, arnold.AI_TYPE_UINT ]: userDefault = __aiMetadataGetInt( nodeEntry, paramName, "gaffer.userDefault" ) elif paramType == arnold.AI_TYPE_BOOLEAN: userDefault = __aiMetadataGetBool( nodeEntry, paramName, "gaffer.userDefault" ) elif paramType == arnold.AI_TYPE_FLOAT: userDefault = __aiMetadataGetFlt( nodeEntry, paramName, "gaffer.userDefault" ) elif paramType == arnold.AI_TYPE_RGB: userDefault = __aiMetadataGetRGB( nodeEntry, paramName, "gaffer.userDefault" ) #elif paramType == arnold.AI_TYPE_RGBA: # userDefault = __aiMetadataGetRGBA( nodeEntry, paramName, "gaffer.userDefault" ) elif paramType == arnold.AI_TYPE_VECTOR: userDefault = __aiMetadataGetVec( nodeEntry, paramName, "gaffer.userDefault" ) elif paramType == arnold.AI_TYPE_VECTOR2: userDefault = __aiMetadataGetVec2( nodeEntry, paramName, "gaffer.userDefault" ) elif paramType == arnold.AI_TYPE_STRING: userDefault = __aiMetadataGetStr( nodeEntry, paramName, "gaffer.userDefault" ) elif paramType == arnold.AI_TYPE_ENUM: userDefault = __aiMetadataGetStr( nodeEntry, paramName, "gaffer.userDefault" ) if userDefault: nodeName, _, plugName = paramPath.split( "." ) Gaffer.Metadata.registerValue( "ai:surface:%s:%s" % ( nodeName, plugName ), "userDefault", userDefault ) with IECoreArnold.UniverseBlock( writable = False ) : nodeIt = arnold.AiUniverseGetNodeEntryIterator( arnold.AI_NODE_SHADER | arnold.AI_NODE_LIGHT | arnold.AI_NODE_COLOR_MANAGER ) while not arnold.AiNodeEntryIteratorFinished( nodeIt ) : __translateNodeMetadata( arnold.AiNodeEntryIteratorGetNext( nodeIt ) ) ########################################################################## # Gaffer Metadata queries. These are implemented using the preconstructed # registry above. ########################################################################## def __nodeDescription( node ) : if isinstance( node, GafferArnold.ArnoldShader ) : return __metadata[node["name"].getValue()].get( "description", """Loads shaders for use in Arnold renders. Use the ShaderAssignment node to assign shaders to objects in the scene.""", ) else : return __metadata[node["__shader"]["name"].getValue()].get( "description", """Loads an Arnold light shader and uses it to output a scene with a single light.""" ) def __nodeMetadata( node, name ) : if isinstance( node, GafferArnold.ArnoldShader ) : key = node["name"].getValue() else : # Other nodes hold an internal shader key = node["__shader"]["name"].getValue() return __metadata[key].get( name ) def __plugMetadata( plug, name ) : if name == "noduleLayout:visible" and plug.getInput() is not None and not plug.node().getName().startswith( "__" ) : # Before the introduction of nodule visibility controls, # users may have made connections to plugs which are now # hidden by default. Make sure we continue to show them # by default - they can still be hidden explicitly by # adding an instance metadata value. # For private nodes this behaviour is skipped as their # inputs might be driven by the parent. return True node = plug.node() if isinstance( node, GafferArnold.ArnoldShader ) : key = plug.node()["name"].getValue() + "." + plug.relativeName( node ) else : # Other nodes hold an internal shader key = plug.node()["__shader"]["name"].getValue() + "." + plug.relativeName( node ) return __metadata[key].get( name ) for nodeType in ( GafferArnold.ArnoldShader, GafferArnold.ArnoldLight, GafferArnold.ArnoldMeshLight, GafferArnold.ArnoldColorManager ) : nodeKeys = set() parametersPlugKeys = set() parameterPlugKeys = set() parameterPlugComponentKeys = set() for name, metadata in __metadata.items() : keys = ( nodeKeys, parametersPlugKeys, parameterPlugKeys, parameterPlugComponentKeys )[name.count( "." )] keys.update( metadata.keys() ) for key in nodeKeys : Gaffer.Metadata.registerValue( nodeType, key, functools.partial( __nodeMetadata, name = key ) ) for key in parametersPlugKeys : Gaffer.Metadata.registerValue( nodeType, "parameters", key, functools.partial( __plugMetadata, name = key ) ) for key in parameterPlugKeys : Gaffer.Metadata.registerValue( nodeType, "parameters.*", key, functools.partial( __plugMetadata, name = key ) ) for key in parameterPlugComponentKeys : Gaffer.Metadata.registerValue( nodeType, "parameters.*.[xyzrgb]", key, functools.partial( __plugMetadata, name = key ) ) Gaffer.Metadata.registerValue( nodeType, "description", __nodeDescription ) Gaffer.Metadata.registerValue( GafferArnold.ArnoldShader, "attributeSuffix", "plugValueWidget:type", "GafferUI.StringPlugValueWidget" ) Gaffer.Metadata.registerValue( GafferArnold.ArnoldShader, "layout:activator:suffixActivator", lambda parent : parent["type"].getValue() == "ai:lightFilter" ) Gaffer.Metadata.registerValue( GafferArnold.ArnoldShader, "attributeSuffix", "layout:visibilityActivator", "suffixActivator" )
"""A scheme for assigning categories to morphs. To change the number or meaning of categories, only this file should need to be modified. """ from __future__ import unicode_literals import collections import locale import logging import math import sys from . import utils PY3 = sys.version_info.major == 3 # _str is used to convert command line arguments to the right type # (str for PY3, unicode for PY2) if PY3: _str = str else: _str = lambda x: unicode(x, encoding=locale.getpreferredencoding()) _logger = logging.getLogger(__name__) class WordBoundary(object): """A special symbol for marking word boundaries. Using an object of this type allows arbitrary characters in the corpus, while using a string e.g. '#' instead causes that char to be reserved. """ def __repr__(self): return '#' def __len__(self): return 0 def __eq__(self, other): # Word boundaries from different runs should be equal if isinstance(other, WordBoundary): return True return False def __hash__(self): # This is called a lot. Using constant for minor optimization. #return hash(self.__class__.__name__) return 8364886854198508766 # Using a string is slightly faster. # Change to WordBoundary if you want to e.g. support '#':s in the corpus WORD_BOUNDARY = '#' # WordBoundary() ################################## ### Categorization-dependent code: ### to change the categories, only code in this module ### should need to be changed. # A data structure with one value for each category. # This also defines the set of possible categories ByCategory = collections.namedtuple('ByCategory', ['PRE', 'STM', 'SUF', 'ZZZ']) DEFAULT_CATEGORY = 'STM' # The morph usage/context features used to calculate the probability of a # morph belonging to a category. MorphContext = collections.namedtuple('MorphContext', ['count', 'left_perplexity', 'right_perplexity']) AnalysisAlternative = collections.namedtuple('AnalysisAlternative', ['analysis', 'penalty']) # Context type flags, from which the context type is formed. # (Binary flags in integer format) CONTEXT_FLAG_INITIAL = 1 CONTEXT_FLAG_FINAL = 2 # The context type values CONTEXT_TYPE_INTERNAL = 0 CONTEXT_TYPE_INITIAL = CONTEXT_TYPE_INTERNAL + CONTEXT_FLAG_INITIAL CONTEXT_TYPE_FINAL = CONTEXT_TYPE_INTERNAL + CONTEXT_FLAG_FINAL CONTEXT_TYPE_BOTH = (CONTEXT_TYPE_INTERNAL + CONTEXT_FLAG_INITIAL + CONTEXT_FLAG_FINAL) # Penalty for each non-morpheme, in heuristic postprocessing # Must be smaller than LOGPROB_ZERO, to prevent impossible taggings from # being generated. NON_MORPHEME_PENALTY = 50 class Postprocessor(object): def __init__(self): self.temporaries = set() """abstract base class for heuristic output postprocessors""" def _join_at(self, analysis, i): """Helper function for joins""" tag = analysis[i].category if analysis[i + 1].category != 'ZZZ': tag = analysis[i + 1].category if tag == 'ZZZ': tag = 'STM' morph = analysis[i].morph + analysis[i + 1].morph cmorph = CategorizedMorph(morph, tag) self.temporaries.add(cmorph) out = list(analysis[:i]) + [cmorph] if len(analysis) > (i + 2): out.extend(analysis[(i + 2):]) return out def __eq__(self, other): return type(self) == type(other) # FIXME: badly named, should be NonmorphemeRemovalPostprocessor class HeuristicPostprocessor(Postprocessor): """Heuristic post-processing to remove non-morphemes from the final segmentation. Unlike in Morfessor Cat-ML, this is not necessary during training for controlling model complexity, but only as a post-processing step to ensure meaningful categories. """ def __init__(self, max_join_stem_len=4): super(HeuristicPostprocessor, self).__init__() self.max_join_stem_len = max_join_stem_len def apply_to(self, analysis, model): """Remove nonmorphemes from the analysis by joining or retagging morphs, using heuristics.""" # Nothing to do if there are no nonmorphemes if all([m.category != 'ZZZ' for m in analysis]): return analysis if len(analysis) == 1: return (CategorizedMorph(analysis[0].morph, 'STM'),) # Sequencs of ZZZs should be joined analysis = self._join_sequences(analysis, model.forcesplit) # Resulting long ZZZs are retagged as stems self._long_to_stem(analysis, 4) # Might be done at this point if all(m.category != 'ZZZ' for m in analysis): return analysis # Retag parts of a multiple-suffix tail as SUF self._tail_suffixes(analysis) # If not: stronger measures are needed # Force join remaining analysis = self._force_join(analysis, model.forcesplit) # Retag with non-morphemes forbidden analysis = model.viterbi_tag(analysis, forbid_zzz=True) return analysis def _join_sequences(self, analysis, forcesplit): """Joins consecutive non-morphemes""" prev = None out = [] for m in analysis: if (prev is None or (m.category != 'ZZZ' or m.morph in forcesplit) or (prev.morph in forcesplit) or (prev.category != 'ZZZ')): if prev is not None: out.append(prev) prev = m continue # prev is also a non-morpheme, and eligible for joining prev = CategorizedMorph(prev.morph + m.morph, 'ZZZ') if prev is not None: out.append(prev) return out def _long_to_stem(self, analysis, min_len): """Converts long non-morphemes into stems. In-place operation.""" for m in analysis: if m.category == 'ZZZ' and len(m.morph) >= min_len: m.category = 'STM' def _tail_suffixes(self, analysis): """Converts trailing non-morphemes into suffixes. In-place operation. """ for (i, m) in enumerate(analysis): if i == 0: continue if m.category == 'ZZZ' and analysis[i - 1].category == 'SUF': if all(tail.category in ('SUF', 'ZZZ') for tail in analysis[(i + 1):]): m.category = 'SUF' def _force_join(self, analysis, forcesplit): """Joins non-morphemes with previous or next morph""" prev = None out = [] if len(analysis) < 2: return analysis if (analysis[0].category == 'ZZZ' and analysis[0].morph not in forcesplit and analysis[1].morph not in forcesplit): analysis = self._join_at(analysis, 0) for m in analysis: if prev is None: prev = m continue if ((m.category != 'ZZZ' or m.morph in forcesplit) or (prev.morph in forcesplit)): if prev is not None: out.append(prev) prev = m continue # prev is eligible for joining prev = CategorizedMorph(prev.morph + m.morph, 'ZZZ') if prev is not None: out.append(prev) return out class CompoundSegmentationPostprocessor(Postprocessor): """Postprocessor that makes FlatCat perform compound segmentation""" def __init__(self, long_to_stems=True): self._long_to_stems = long_to_stems def apply_to(self, analysis, model=None): if self._long_to_stems: analysis = list(self.long_to_stems(analysis)) parts = self.split_compound(analysis) out = [] for part in parts: part = [morph.morph for morph in part] part = ''.join(part) out.append(CategorizedMorph(part, 'STM')) return out def long_to_stems(self, analysis): for morph in analysis: if morph.category == 'STM': # avoids unnecessary NOOP re-wrapping yield morph elif len(morph) >= 5: yield CategorizedMorph(morph.morph, 'STM') else: yield morph def split_compound(self, analysis): out = [] current = [] prev = None for morph in analysis: if prev is not None and prev != 'PRE': if morph.category in ('PRE', 'STM'): out.append(current) current = [] current.append(morph) prev = morph.category out.append(current) return out class MorphContextBuilder(object): """Temporary structure used when calculating the MorphContexts.""" def __init__(self): self.count = 0 self.left = collections.Counter() self.right = collections.Counter() @property def left_perplexity(self): return MorphContextBuilder._perplexity(self.left) @property def right_perplexity(self): return MorphContextBuilder._perplexity(self.right) @staticmethod def _perplexity(contexts): entropy = 0 if isinstance(contexts, int): total_tokens = float(contexts) contexts = {i: 1. for i in range(contexts)} else: total_tokens = float(sum(contexts.values())) for c in contexts: p = float(contexts[c]) / total_tokens entropy -= p * math.log(p) return math.exp(entropy) class MorphUsageProperties(object): """This class describes how the prior probabilities are calculated from the usage of morphs. """ # These transitions are impossible zero_transitions = ((WORD_BOUNDARY, WORD_BOUNDARY), ('PRE', WORD_BOUNDARY), ('PRE', 'SUF'), (WORD_BOUNDARY, 'SUF')) # Adding these transitions removes the use of non-morphemes forbid_zzz = ((WORD_BOUNDARY, 'ZZZ'), ('PRE', 'ZZZ'), ('STM', 'ZZZ'), ('SUF', 'ZZZ')) # Cache for memoized valid transitions _valid_transitions = None def __init__(self, ppl_threshold=100, ppl_slope=None, length_threshold=3, length_slope=2, type_perplexity=False, min_perplexity_length=4, pre_ppl_threshold=None, uncapped_ppl=False): """Initialize the model parameters describing morph usage. Arguments: ppl_threshold : threshold value for sigmoid used to calculate probabilities from left and right perplexities. ppl_slope : Slope value for sigmoid used to calculate probabilities from left and right perplexities. length_threshold : threshold value for sigmoid used to calculate probabilities from length of morph. length_slope : Slope value for sigmoid used to calculate probabilities from length of morph. type_perplexity : If true, perplexity is based on word types, If false, perplexity is based on word tokens. min_perplexity_length : Morphs shorter than this length are ignored when calculating perplexity. pre_ppl_threshold: Separte ppl thresh for prefixes. """ if ppl_threshold is None: self._ppl_threshold = None else: self._ppl_threshold = float(ppl_threshold) if pre_ppl_threshold is None: self._pre_ppl_threshold = self._ppl_threshold else: self._pre_ppl_threshold = float(pre_ppl_threshold) self._length_threshold = float(length_threshold) self._length_slope = float(length_slope) self.type_perplexity = bool(type_perplexity) self._min_perplexity_length = int(min_perplexity_length) if ppl_slope is not None: self._ppl_slope = float(ppl_slope) self._pre_ppl_slope = self._ppl_slope elif self._ppl_threshold is None: self._ppl_slope = None self._pre_ppl_slope = self._ppl_slope else: self._ppl_slope = 10.0 / self._ppl_threshold self._pre_ppl_slope = 10.0 / self._pre_ppl_threshold self._uncapped_ppl = uncapped_ppl # Counts of different contexts in which a morph occurs self._contexts = utils.Sparse(default=MorphContext(0, 1.0, 1.0)) self._context_builders = collections.defaultdict(MorphContextBuilder) self._contexts_per_iter = 50000 # FIXME customizable # Cache for memoized feature-based conditional class probabilities self._condprob_cache = collections.defaultdict(float) self._marginalizer = None self._zlctc = None def get_params(self): """Returns a dict of hyperparameters.""" params = { 'perplexity-threshold': self._ppl_threshold, 'pre-perplexity-threshold': self._pre_ppl_threshold, 'perplexity-slope': self._ppl_slope, 'pre-perplexity-slope': self._pre_ppl_slope, 'length-threshold': self._length_threshold, 'length-slope': self._length_slope, 'type-perplexity': self.type_perplexity, 'min-perplexity-length': self._min_perplexity_length} return params def set_params(self, params): """Sets hyperparameters to loaded values.""" params = {key: val for (key, val) in params.items() if val is not None} if 'perplexity-threshold' in params: _logger.info('Setting perplexity-threshold to {}'.format( params['perplexity-threshold'])) self._ppl_threshold = (float(params['perplexity-threshold'])) if 'pre-perplexity-threshold' in params: _logger.info('Setting pre-perplexity-threshold to {}'.format( params['pre-perplexity-threshold'])) self._pre_ppl_threshold = (float( params['pre-perplexity-threshold'])) if 'perplexity-slope' in params: _logger.info('Setting perplexity-slope to {}'.format( params['perplexity-slope'])) self._ppl_slope = (float(params['perplexity-slope'])) if 'pre-perplexity-slope' in params: _logger.info('Setting pre-perplexity-slope to {}'.format( params['perplexity-slope'])) self._pre_ppl_slope = (float(params['pre-perplexity-slope'])) if 'length-threshold' in params: _logger.info('Setting length-threshold to {}'.format( params['length-threshold'])) self._length_threshold = (float(params['length-threshold'])) if 'length-slope' in params: _logger.info('Setting length-slope to {}'.format( params['length-slope'])) self._length_slope = (float(params['length-slope'])) if 'type-perplexity' in params: _logger.info('Setting type-perplexity to {}'.format( params['type-perplexity'])) self.type_perplexity = bool(params['type-perplexity']) if 'min-perplexity-length' in params: _logger.info('Setting min-perplexity-length to {}'.format( params['min-perplexity-length'])) self._min_perplexity_length = (float( params['min-perplexity-length'])) def calculate_usage_features(self, seg_func): """Calculate the usage features of morphs in the corpus.""" self.clear() msg = 'Must set perplexity threshold' assert self._ppl_threshold is not None, msg if self._pre_ppl_threshold is None: self._pre_ppl_threshold = self._ppl_threshold while True: # If risk of running out of memory, perform calculations in # multiple loops over the data conserving_memory = False for rcount, segments in seg_func(): if not self.type_perplexity: pcount = rcount else: # pcount used for perplexity, rcount is real count pcount = 1 for (i, morph) in enumerate(segments): # Collect information about the contexts in which # the morphs occur. if self._add_to_context(morph, pcount, rcount, i, segments): conserving_memory = True self._compress_contexts() if not conserving_memory: break def clear(self): """Resets the context variables. Use before fully reprocessing a segmented corpus.""" self._contexts.clear() self._context_builders.clear() self._condprob_cache.clear() self._marginalizer = None self._zlctc = None def _add_to_context(self, morph, pcount, rcount, i, segments): """Collect information about the contexts in which the morph occurs""" if morph in self._contexts: return False if (len(self._context_builders) > self._contexts_per_iter and morph not in self._context_builders): return True # Previous morph. if i == 0: # Word boundaries are counted as separate contexts neighbour = WORD_BOUNDARY else: neighbour = segments[i - 1] # Contexts shorter than threshold don't affect perplexity if len(neighbour) < self._min_perplexity_length: neighbour = None if neighbour is not None: self._context_builders[morph].left[neighbour] += pcount # Next morph. if i == len(segments) - 1: neighbour = WORD_BOUNDARY else: neighbour = segments[i + 1] if len(neighbour) < self._min_perplexity_length: neighbour = None if neighbour is not None: self._context_builders[morph].right[neighbour] += pcount self._context_builders[morph].count += rcount return False def _compress_contexts(self): """Calculate compact features from the context data collected into _context_builders. This is done to save memory.""" for morph in self._context_builders: tmp = self._context_builders[morph] self._contexts[morph] = MorphContext(tmp.count, tmp.left_perplexity, tmp.right_perplexity) self._context_builders.clear() def condprobs(self, morph): """Calculate feature-based conditional probabilities P(Category|Morph) from the contexts in which the morphs occur. Arguments: morph : A string representation of the morph type. """ if morph not in self._condprob_cache: context = self._contexts[morph] prelike = sigmoid(context.right_perplexity, self._pre_ppl_threshold, self._pre_ppl_slope) suflike = sigmoid(context.left_perplexity, self._ppl_threshold, self._ppl_slope) stmlike = sigmoid(len(morph), self._length_threshold, self._length_slope) p_nonmorpheme = (1. - prelike) * (1. - suflike) * (1. - stmlike) # assert 0 <= p_nonmorpheme <= 1 if p_nonmorpheme == 1: p_pre = 0.0 p_suf = 0.0 p_stm = 0.0 else: if p_nonmorpheme < 0.001: p_nonmorpheme = 0.001 normcoeff = ((1.0 - p_nonmorpheme) / ((prelike ** 2) + (suflike ** 2) + (stmlike ** 2))) p_pre = (prelike ** 2) * normcoeff p_suf = (suflike ** 2) * normcoeff p_stm = 1.0 - p_pre - p_suf - p_nonmorpheme self._condprob_cache[morph] = ByCategory(p_pre, p_stm, p_suf, p_nonmorpheme) return self._condprob_cache[morph] @property def marginal_class_probs(self): """True distribution of class probabilities, calculated by marginalizing over the feature based conditional probabilities over all observed morphs. This will not give the same result as the observed count based calculation. """ return self._get_marginalizer().normalized() @property def category_token_count(self): """Un-normalized distribution of class probabilities, the sum of which is the number of observed morphs. See marginal_class_probs for the normalized version. """ return self._get_marginalizer().category_token_count def zlog_category_token_count(self): if self._zlctc is None: self._zlctc = ByCategory( *[utils.zlog(x) for x in self.category_token_count]) return self._zlctc def _get_marginalizer(self): if self._marginalizer is None: self._marginalizer = Marginalizer() for morph in self.seen_morphs(): self._marginalizer.add(self.count(morph), self.condprobs(morph)) self._zlctc = None return self._marginalizer def feature_cost(self, morph): """The cost of encoding the necessary features along with a morph. The length in characters of the morph is also a feature, but it does not need to be encoded as it is available from the surface form. """ context = self._contexts[morph] return (universalprior(context.right_perplexity) + universalprior(context.left_perplexity)) def estimate_contexts(self, old_morphs, new_morphs, max_contexts=None): """Estimates context features for new unseen morphs. Arguments: old_morphs : A sequence of morphs being replaced. The existing context of these morphs can be used in the estimation. new_morphs : A sequence of morphs that replaces the old ones. Any previously unseen morphs in this sequence will get context features estimated from their surface form and/or from the contexts of the old morphs they replace. Returns: A list of temporary morph contexts that have been estimated. These should be removed by the caller if no longer necessary. The removal is done using MorphContext.remove_temporaries. """ try: uncapped_ppl = self._uncapped_ppl except AttributeError: uncapped_ppl = True if not uncapped_ppl and max_contexts is not None: max_ppl = MorphContextBuilder._perplexity(max_contexts) temporaries = [] for (i, morph) in enumerate(new_morphs): if morph in self: # The morph already has real context: no need to estimate continue if i == 0: # Prefix inherits left perplexity of leftmost parent l_ppl = self._contexts[old_morphs[0]].left_perplexity if not uncapped_ppl: l_ppl = min(l_ppl, max_ppl) else: # Otherwise assume that the morph doesn't appear in any # other contexts, which gives perplexity 1.0 l_ppl = 1.0 if i == len(new_morphs) - 1: r_ppl = self._contexts[old_morphs[-1]].right_perplexity if not uncapped_ppl: r_ppl = min(r_ppl, max_ppl) else: r_ppl = 1.0 count = 0 # estimating does not add instances of the morph self._contexts[morph] = MorphContext(count, l_ppl, r_ppl) temporaries.append(morph) return temporaries @staticmethod def context_type(prev_morph, next_morph, prev_cat, next_cat): """Cluster certain types of context, to allow making context-dependant joining decisions.""" # This categorization scheme ignores prev_morph, next_morph, # and only uses the categories ctype = CONTEXT_TYPE_INTERNAL if prev_cat == WORD_BOUNDARY or prev_cat == 'PRE': ctype += CONTEXT_FLAG_INITIAL if next_cat == WORD_BOUNDARY or next_cat == 'SUF': ctype += CONTEXT_FLAG_FINAL return ctype ### End of categorization-dependent code ######################################## # But not the end of the class: # The methods in this class below this line are helpers that will # probably not need to be modified if the categorization scheme changes # def remove_temporaries(self, temporaries): """Remove estimated temporary morph contexts when no longer needed.""" for morph in temporaries: if morph not in self: continue msg = '{}: {}'.format(morph, self._contexts[morph].count) assert self._contexts[morph].count == 0, msg del self._contexts[morph] if morph in self._condprob_cache: del self._condprob_cache[morph] def remove_zeros(self): """Remove context information for all morphs contexts with zero count. This can save a bit more memory than just removing estimated temporary contexts. Estimated context will be used for the removed morphs for the rest of the iteration.""" remove_list = [] for morph in self._contexts.keys(): if self._contexts[morph].count == 0: remove_list.append(morph) for morph in remove_list: del self._contexts[morph] if morph in self._condprob_cache: del self._condprob_cache[morph] def seen_morphs(self): """All morphs that have defined contexts.""" return [morph for morph in self._contexts.keys() if self._contexts[morph].count > 0] def __contains__(self, morph): return morph in self._contexts and self._contexts[morph].count > 0 def get_context_features(self, morph): """Returns the context features of a seen morph.""" return self._contexts[morph] def count(self, morph): """The counts in the corpus of morphs with contexts.""" if morph not in self._contexts: return 0 return self._contexts[morph].count def set_count(self, morph, new_count): """Set the number of observed occurences of a morph. Also updates the true category distribution. """ if self._marginalizer is not None and self.count(morph) > 0: self._marginalizer.add(-self.count(morph), self.condprobs(morph)) self._contexts[morph] = self._contexts[morph]._replace(count=new_count) assert self.count(morph) >= 0, '{} subzero count'.format(morph) if self._marginalizer is not None and self.count(morph) > 0: self._marginalizer.add(self.count(morph), self.condprobs(morph)) self._zlctc = None @classmethod def valid_transitions(cls): """Returns (and caches) all valid transitions as pairs (from_category, to_category). Any transitions not included in the list are forbidden, and must have count 0 and probability 0. """ if cls._valid_transitions is None: cls._valid_transitions = [] categories = get_categories(wb=True) for cat1 in categories: for cat2 in categories: if (cat1, cat2) in cls.zero_transitions: continue cls._valid_transitions.append((cat1, cat2)) cls._valid_transitions = tuple(cls._valid_transitions) return cls._valid_transitions class MaximumLikelihoodMorphUsage(object): """This is a replacement for MorphUsageProperties, that uses ML-estimation to replace the property-based conditional category probabilities. """ zero_transitions = MorphUsageProperties.zero_transitions forbid_zzz = MorphUsageProperties.forbid_zzz _valid_transitions = MorphUsageProperties._valid_transitions def __init__(self, corpus_coding, param_dict): self._corpus_coding = corpus_coding self._param_dict = param_dict self._seen = collections.defaultdict(int) def get_params(self): """Returns a dict of hyperparameters.""" return self._param_dict def set_params(self, params): """Sets hyperparameters to loaded values.""" self._param_dict = params def clear(self): self._seen.clear() def calculate_usage_features(self, seg_func): """Recalculate morph counts""" self._seen.clear() for rcount, segments in seg_func(): for morph in segments: self._seen[morph] += rcount def feature_cost(self, morph): """The cost of encoding the necessary features along with a morph. Always zero in the ML-estimation stage. Exists for drop-in compatibility with MorphUsageProperties""" return 0 def estimate_contexts(self, old_morphs, new_morphs): """Exists for drop-in compatibility with MorphUsageProperties""" return [] def remove_temporaries(self, temporaries): """Exists for drop-in compatibility with MorphUsageProperties""" pass def remove_zeros(self): """Exists for drop-in compatibility with MorphUsageProperties""" pass def condprobs(self, morph): """Calculate feature-based conditional probabilities P(Category|Morph) from the contexts in which the morphs occur. Arguments: morph : A string representation of the morph type. """ counts = self._corpus_coding.get_emission_counts(morph) return self._normalize(counts) @property def marginal_class_probs(self): """True distribution of class probabilities, calculated by marginalizing over the feature based conditional probabilities over all observed morphs. This will not give the same result as the observed count based calculation. """ return self._normalize(self.category_token_count) @property def category_token_count(self): """Un-normalized distribution of class probabilities, the sum of which is the number of observed morphs. See marginal_class_probs for the normalized version. """ return ByCategory( self._corpus_coding._cat_tagcount[category] for category in get_categories()) @staticmethod def _normalize(counts): total = sum(counts) assert total != 0 return ByCategory(*(float(x) / total for x in counts)) @staticmethod def context_type(prev_morph, next_morph, prev_cat, next_cat): """Cluster certain types of context, to allow making context-dependant joining decisions.""" return MorphUsageProperties.context_type(prev_morph, next_morph, prev_cat, next_cat) def seen_morphs(self): """All morphs that have defined emissions.""" return [morph for (morph, count) in self._seen.items() if count > 0] def __contains__(self, morph): return morph in self._seen def get_context_features(self, morph): """Returns dummy context features.""" return MorphContext(self.count(morph), 1., 1.) def count(self, morph): """The counts in the corpus of morphs with contexts.""" if morph not in self._seen: return 0 return self._seen[morph] def set_count(self, morph, new_count): """Set the number of observed occurences of a morph. Also updates the true category distribution. """ self._seen[morph] = new_count @classmethod def valid_transitions(cls): """Returns (and caches) all valid transitions as pairs (from_category, to_category). Any transitions not included in the list are forbidden, and must have count 0 and probability 0. """ return cls._valid_transitions class CategorizedMorph(object): """Represents a morph with attached category information. These objects should be treated as immutable, even though it is not enforced by the code. """ __slots__ = ['morph', 'category'] def __init__(self, morph, category=None): self.morph = morph self.category = category def __repr__(self): if self.category is None: return _str(self.morph) return self.morph + '/' + self.category def __eq__(self, other): if not isinstance(other, CategorizedMorph): return False return (self.morph == other.morph and self.category == other.category) def __hash__(self): return hash((self.morph, self.category)) def __len__(self): return len(self.morph) def __getitem__(self, i): return self.morph[i] def get_categories(wb=False): """The category tags supported by this model. Argumments: wb : If True, the word boundary will be included. Default: False. """ categories = list(ByCategory._fields) if wb: categories.append(WORD_BOUNDARY) return categories def sigmoid(value, threshold, slope): return 1.0 / (1.0 + math.exp(-slope * (value - threshold))) _LOG_C = math.log(2.865) def universalprior(positive_number): """Compute the number of nats that are necessary for coding a positive integer according to Rissanen's universal prior. """ return _LOG_C + math.log(positive_number) class Marginalizer(object): """An accumulator for marginalizing the class probabilities P(Category) from all the individual conditional probabilities P(Category|Morph) and observed morph probabilities P(Morph). First the unnormalized distribution is obtained by summing over #(Morph) * P(Category|Morph) over each morph, separately for each category. P(Category) is then obtained by normalizing the distribution. """ def __init__(self): self._counts = [0.0] * len(ByCategory._fields) def add(self, rcount, condprobs): """Add the products #(Morph) * P(Category|Morph) for one observed morph.""" for i, x in enumerate(condprobs): self._counts[i] += float(rcount) * float(x) def normalized(self): """Returns the marginal probabilities for all categories.""" total = self.total_token_count return ByCategory(*[x / total for x in self._counts]) @property def total_token_count(self): """Total number of tokens seen.""" return sum(self._counts) @property def category_token_count(self): """Tokens seen per category.""" return ByCategory(*self._counts) def map_category(analysis, from_cat, to_cat): """Replaces all occurrences of the category from_cat with to_cat, in the given analysis. """ out = [] for cmorph in analysis: if cmorph.category == from_cat: out.append(CategorizedMorph(cmorph.morph, to_cat)) else: out.append(cmorph) return tuple(out)
""" Code for deep Q-learning as described in: Playing Atari with Deep Reinforcement Learning NIPS Deep Learning Workshop 2013 and Human-level control through deep reinforcement learning. Nature, 518(7540):529-533, February 2015 Author of Lasagne port: Nissan Pow Modifications: Nathan Sprague """ import lasagne import numpy as np import theano import theano.tensor as T from updates import deepmind_rmsprop class DeepQLearner: """ Deep Q-learning network using Lasagne. """ def __init__(self, input_width, input_height, num_actions, num_frames, discount, learning_rate, rho, rms_epsilon, momentum, freeze_interval, batch_size, network_type, update_rule, batch_accumulator, input_scale=255.0): self.input_width = input_width self.input_height = input_height self.num_actions = num_actions self.num_frames = num_frames self.batch_size = batch_size self.gamma = discount self.rho = rho self.lr = learning_rate self.rms_epsilon = rms_epsilon self.momentum = momentum self.freeze_interval = freeze_interval self.update_counter = 0 self.l_out = self.build_network(network_type, input_width, input_height, num_actions, num_frames, batch_size) if self.freeze_interval > 0: self.next_l_out = self.build_network(network_type, input_width, input_height, num_actions, num_frames, batch_size) self.reset_q_hat() states = T.tensor4('states') next_states = T.tensor4('next_states') rewards = T.col('rewards') actions = T.icol('actions') #terminals = T.icol('terminals') self.states_shared = theano.shared( np.zeros((batch_size, num_frames, input_height, input_width), dtype=theano.config.floatX)) self.next_states_shared = theano.shared( np.zeros((batch_size, num_frames, input_height, input_width), dtype=theano.config.floatX)) self.rewards_shared = theano.shared( np.zeros((batch_size, 1), dtype=theano.config.floatX), broadcastable=(False, True)) self.actions_shared = theano.shared( np.zeros((batch_size, 1), dtype='int32'), broadcastable=(False, True)) # self.terminals_shared = theano.shared( # np.zeros((batch_size, 1), dtype='int32'), # broadcastable=(False,True)) q_vals = lasagne.layers.get_output(self.l_out, states / input_scale) if self.freeze_interval > 0: next_q_vals = lasagne.layers.get_output(self.next_l_out, next_states / input_scale) else: next_q_vals = lasagne.layers.get_output(self.l_out, next_states / input_scale) next_q_vals = theano.gradient.disconnected_grad(next_q_vals) target = rewards + self.gamma * T.max(next_q_vals, axis=1, keepdims=True) diff = target - q_vals[T.arange(batch_size), actions.reshape((-1,))].reshape((-1, 1)) if batch_accumulator == 'sum': loss = T.sum(diff ** 2) elif batch_accumulator == 'mean': loss = T.mean(diff ** 2) else: raise ValueError("Bad accumulator: {}".format(batch_accumulator)) params = lasagne.layers.helper.get_all_params(self.l_out) givens = { states: self.states_shared, next_states: self.next_states_shared, rewards: self.rewards_shared, actions: self.actions_shared, #terminals: self.terminals_shared } if update_rule == 'deepmind_rmsprop': updates = deepmind_rmsprop(loss, params, self.lr, self.rho, self.rms_epsilon) elif update_rule == 'rmsprop': updates = lasagne.updates.rmsprop(loss, params, self.lr, self.rho, self.rms_epsilon) elif update_rule == 'sgd': updates = lasagne.updates.sgd(loss, params, self.lr) else: raise ValueError("Unrecognized update: {}".format(update_rule)) if self.momentum > 0: updates = lasagne.updates.apply_momentum(updates, None, self.momentum) self._train = theano.function([], [loss, q_vals], updates=updates, givens=givens) self._q_vals = theano.function([], q_vals, givens={states: self.states_shared}) def build_network(self, network_type, input_width, input_height, output_dim, num_frames, batch_size): if network_type == "nature_cuda": return self.build_nature_network(input_width, input_height, output_dim, num_frames, batch_size) if network_type == "nature_dnn": return self.build_nature_network_dnn(input_width, input_height, output_dim, num_frames, batch_size) elif network_type == "nips_cuda": return self.build_nips_network(input_width, input_height, output_dim, num_frames, batch_size) elif network_type == "nips_dnn": return self.build_nips_network_dnn(input_width, input_height, output_dim, num_frames, batch_size) elif network_type == "linear": return self.build_linear_network(input_width, input_height, output_dim, num_frames, batch_size) else: raise ValueError("Unrecognized network: {}".format(network_type)) def train(self, states, actions, rewards, next_states, terminals): """ Train one batch. Arguments: states - b x f x h x w numpy array, where b is batch size, f is num frames, h is height and w is width. actions - b x 1 numpy array of integers rewards - b x 1 numpy array next_states - b x f x h x w numpy array terminals - b x 1 numpy boolean array (currently ignored) Returns: average loss """ self.states_shared.set_value(states) self.next_states_shared.set_value(next_states) self.actions_shared.set_value(actions) self.rewards_shared.set_value(rewards) #self.terminals_shared.set_value(np.logical_not(terminals)) if (self.freeze_interval > 0 and self.update_counter % self.freeze_interval == 0): self.reset_q_hat() loss, _ = self._train() self.update_counter += 1 return np.sqrt(loss) def q_vals(self, state): states = np.zeros((self.batch_size, self.num_frames, self.input_height, self.input_width), dtype=theano.config.floatX) states[0, ...] = state self.states_shared.set_value(states) return self._q_vals()[0] def choose_action(self, state, epsilon): if np.random.rand() < epsilon: return np.random.randint(0, self.num_actions) q_vals = self.q_vals(state) return np.argmax(q_vals) def reset_q_hat(self): all_params = lasagne.layers.helper.get_all_param_values(self.l_out) lasagne.layers.helper.set_all_param_values(self.next_l_out, all_params) def build_nature_network(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a large network consistent with the DeepMind Nature paper. """ from lasagne.layers import cuda_convnet l_in = lasagne.layers.InputLayer( shape=(batch_size, num_frames, input_width, input_height) ) l_conv1 = cuda_convnet.Conv2DCCLayer( l_in, num_filters=32, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(c01b=True), # Defaults to Glorot b=lasagne.init.Constant(.1), dimshuffle=True ) l_conv2 = cuda_convnet.Conv2DCCLayer( l_conv1, num_filters=64, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(c01b=True), b=lasagne.init.Constant(.1), dimshuffle=True ) l_conv3 = cuda_convnet.Conv2DCCLayer( l_conv2, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(c01b=True), b=lasagne.init.Constant(.1), dimshuffle=True ) l_hidden1 = lasagne.layers.DenseLayer( l_conv3, num_units=512, nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) return l_out def build_nature_network_dnn(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a large network consistent with the DeepMind Nature paper. """ from lasagne.layers import dnn l_in = lasagne.layers.InputLayer( shape=(batch_size, num_frames, input_width, input_height) ) l_conv1 = dnn.Conv2DDNNLayer( l_in, num_filters=32, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv2 = dnn.Conv2DDNNLayer( l_conv1, num_filters=64, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_conv3 = dnn.Conv2DDNNLayer( l_conv2, num_filters=64, filter_size=(3, 3), stride=(1, 1), nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_hidden1 = lasagne.layers.DenseLayer( l_conv3, num_units=512, nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, W=lasagne.init.HeUniform(), b=lasagne.init.Constant(.1) ) return l_out def build_nips_network(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a network consistent with the 2013 NIPS paper. """ from lasagne.layers import cuda_convnet l_in = lasagne.layers.InputLayer( shape=(batch_size, num_frames, input_width, input_height) ) l_conv1 = cuda_convnet.Conv2DCCLayer( l_in, num_filters=16, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(c01b=True), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1), dimshuffle=True ) l_conv2 = cuda_convnet.Conv2DCCLayer( l_conv1, num_filters=32, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(c01b=True), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1), dimshuffle=True ) l_hidden1 = lasagne.layers.DenseLayer( l_conv2, num_units=256, nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) return l_out def build_nips_network_dnn(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a network consistent with the 2013 NIPS paper. """ # Import it here, in case it isn't installed. from lasagne.layers import dnn l_in = lasagne.layers.InputLayer( shape=(batch_size, num_frames, input_width, input_height) ) l_conv1 = dnn.Conv2DDNNLayer( l_in, num_filters=16, filter_size=(8, 8), stride=(4, 4), nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) l_conv2 = dnn.Conv2DDNNLayer( l_conv1, num_filters=32, filter_size=(4, 4), stride=(2, 2), nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) l_hidden1 = lasagne.layers.DenseLayer( l_conv2, num_units=256, nonlinearity=lasagne.nonlinearities.rectify, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) l_out = lasagne.layers.DenseLayer( l_hidden1, num_units=output_dim, nonlinearity=None, #W=lasagne.init.HeUniform(), W=lasagne.init.Normal(.01), b=lasagne.init.Constant(.1) ) return l_out def build_linear_network(self, input_width, input_height, output_dim, num_frames, batch_size): """ Build a simple linear learner. Useful for creating tests that sanity-check the weight update code. """ l_in = lasagne.layers.InputLayer( shape=(batch_size, num_frames, input_width, input_height) ) l_out = lasagne.layers.DenseLayer( l_in, num_units=output_dim, nonlinearity=None, W=lasagne.init.Constant(0.0), b=None ) return l_out def main(): net = DeepQLearner(84, 84, 16, 4, .99, .00025, .95, .95, 10000, 32, 'nature_cuda') if __name__ == '__main__': main()
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SparseConcat.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test class SparseConcatTest(test.TestCase): def _SparseTensor_UnknownShape(self, ind_shape=None, val_shape=None, shape_shape=None): return sparse_tensor.SparseTensor( array_ops.placeholder( dtypes.int64, shape=ind_shape), array_ops.placeholder( dtypes.float32, shape=val_shape), array_ops.placeholder( dtypes.int64, shape=shape_shape)) def _SparseTensorValue_3x3(self): # [ 1] # [2 ] # [3 4] ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]]) val = np.array([1, 2, 3, 4]) shape = np.array([3, 3]) return sparse_tensor.SparseTensorValue( np.array(ind, np.int64), np.array(val, np.float32), np.array(shape, np.int64)) def _SparseTensor_3x3(self): return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x3()) def _SparseTensorValue_3x5(self): # [ ] # [ 1 ] # [2 1 0] ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]]) val = np.array([1, 2, 1, 0]) shape = np.array([3, 5]) return sparse_tensor.SparseTensorValue( np.array(ind, np.int64), np.array(val, np.float32), np.array(shape, np.int64)) def _SparseTensor_3x5(self): return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x5()) def _SparseTensor_3x2(self): # [ ] # [1 ] # [2 ] ind = np.array([[1, 0], [2, 0]]) val = np.array([1, 2]) shape = np.array([3, 2]) return sparse_tensor.SparseTensor( constant_op.constant(ind, dtypes.int64), constant_op.constant(val, dtypes.float32), constant_op.constant(shape, dtypes.int64)) def _SparseTensor_2x3(self): # [ 1 ] # [1 2] ind = np.array([[0, 1], [1, 0], [1, 2]]) val = np.array([1, 1, 2]) shape = np.array([2, 3]) return sparse_tensor.SparseTensor( constant_op.constant(ind, dtypes.int64), constant_op.constant(val, dtypes.float32), constant_op.constant(shape, dtypes.int64)) def _SparseTensor_2x3x4(self): ind = np.array([ [0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1], [1, 1, 3], [1, 2, 2]]) val = np.array([1, 10, 12, 103, 111, 113, 122]) shape = np.array([2, 3, 4]) return sparse_tensor.SparseTensor( constant_op.constant(ind, dtypes.int64), constant_op.constant(val, dtypes.float32), constant_op.constant(shape, dtypes.int64)) def _SparseTensor_String3x3(self): # [ a] # [b ] # [c d] ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]]) val = np.array(["a", "b", "c", "d"]) shape = np.array([3, 3]) return sparse_tensor.SparseTensor( constant_op.constant(ind, dtypes.int64), constant_op.constant(val, dtypes.string), constant_op.constant(shape, dtypes.int64)) def _SparseTensor_String3x5(self): # [ ] # [ e ] # [f g h] ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]]) val = np.array(["e", "f", "g", "h"]) shape = np.array([3, 5]) return sparse_tensor.SparseTensor( constant_op.constant(ind, dtypes.int64), constant_op.constant(val, dtypes.string), constant_op.constant(shape, dtypes.int64)) def testConcat1(self): with self.session(use_gpu=False) as sess: # concat(A): # [ 1] # [2 ] # [3 4] for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()): # Note that we ignore concat_dim in this case since we short-circuit the # single-input case in python. for concat_dim in (-2000, 1, 2000): sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a]) self.assertEqual(sp_concat.indices.get_shape(), [4, 2]) self.assertEqual(sp_concat.values.get_shape(), [4]) self.assertEqual(sp_concat.dense_shape.get_shape(), [2]) concat_out = sess.run(sp_concat) self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [2, 0], [2, 2]]) self.assertAllEqual(concat_out.values, [1, 2, 3, 4]) self.assertAllEqual(concat_out.dense_shape, [3, 3]) def testConcat2(self): with self.session(use_gpu=False) as sess: # concat(A, B): # [ 1 ] # [2 1 ] # [3 4 2 1 0] for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()): for sp_b in (self._SparseTensorValue_3x5(), self._SparseTensor_3x5()): for concat_dim in (-1, 1): sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b]) self.assertEqual(sp_concat.indices.get_shape(), [8, 2]) self.assertEqual(sp_concat.values.get_shape(), [8]) self.assertEqual(sp_concat.dense_shape.get_shape(), [2]) concat_out = sess.run(sp_concat) self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7]]) self.assertAllEqual(concat_out.values, [1, 2, 1, 3, 4, 2, 1, 0]) self.assertAllEqual(concat_out.dense_shape, [3, 8]) def testConcatDim0(self): with self.session(use_gpu=False) as sess: # concat(A, D): # [ 1] # [2 ] # [3 4] # [ 1 ] # [1 2] sp_a = self._SparseTensor_3x3() sp_d = self._SparseTensor_2x3() for concat_dim in (-2, 0): sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_d]) self.assertEqual(sp_concat.indices.get_shape(), [7, 2]) self.assertEqual(sp_concat.values.get_shape(), [7]) self.assertEqual(sp_concat.dense_shape.get_shape(), [2]) concat_out = sess.run(sp_concat) self.assertAllEqual( concat_out.indices, [[0, 2], [1, 0], [2, 0], [2, 2], [3, 1], [4, 0], [4, 2]]) self.assertAllEqual(concat_out.values, np.array([1, 2, 3, 4, 1, 1, 2])) self.assertAllEqual(concat_out.dense_shape, np.array([5, 3])) def testConcat3(self): with self.session(use_gpu=False) as sess: # concat(A, B, C): # [ 1 ] # [2 1 1 ] # [3 4 2 1 0 2 ] sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x5() sp_c = self._SparseTensor_3x2() for concat_dim in (-1, 1): sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c]) self.assertEqual(sp_concat.indices.get_shape(), [10, 2]) self.assertEqual(sp_concat.values.get_shape(), [10]) self.assertEqual(sp_concat.dense_shape.get_shape(), [2]) concat_out = sess.run(sp_concat) self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4], [1, 8], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7], [2, 8]]) self.assertAllEqual(concat_out.values, [1, 2, 1, 1, 3, 4, 2, 1, 0, 2]) self.assertAllEqual(concat_out.dense_shape, [3, 10]) def testConcatNonNumeric(self): with self.session(use_gpu=False) as sess: # concat(A, B): # [ a ] # [b e ] # [c d f g h] sp_a = self._SparseTensor_String3x3() sp_b = self._SparseTensor_String3x5() for concat_dim in (-1, 1): sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b]) self.assertEqual(sp_concat.indices.get_shape(), [8, 2]) self.assertEqual(sp_concat.values.get_shape(), [8]) self.assertEqual(sp_concat.dense_shape.get_shape(), [2]) concat_out = sess.run(sp_concat) self.assertAllEqual( concat_out.indices, [[0, 2], [1, 0], [1, 4], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7]]) self.assertAllEqual(concat_out.values, [b"a", b"b", b"e", b"c", b"d", b"f", b"g", b"h"]) self.assertAllEqual(concat_out.dense_shape, [3, 8]) def testMismatchedRank(self): with self.session(use_gpu=False): sp_a = self._SparseTensor_3x3() sp_e = self._SparseTensor_2x3x4() # Rank mismatches can be caught at shape-inference time for concat_dim in (-1, 1): with self.assertRaises(ValueError): sparse_ops.sparse_concat(concat_dim, [sp_a, sp_e]) def testMismatchedRankExpandNonconcatDim(self): with self.session(use_gpu=False): sp_a = self._SparseTensor_3x3() sp_e = self._SparseTensor_2x3x4() # Rank mismatches should be caught at shape-inference time, even for # expand_nonconcat_dim=True. for concat_dim in (-1, 1): with self.assertRaises(ValueError): sparse_ops.sparse_concat( concat_dim, [sp_a, sp_e], expand_nonconcat_dim=True) def testMismatchedShapes(self): with self.session(use_gpu=False) as sess: sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x5() sp_c = self._SparseTensor_3x2() sp_d = self._SparseTensor_2x3() for concat_dim in (-1, 1): sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c, sp_d]) # Shape mismatches can only be caught when the op is run with self.assertRaisesOpError("Input shapes must match"): sess.run(sp_concat) def testMismatchedShapesExpandNonconcatDim(self): with self.session(use_gpu=False) as sess: sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x5() sp_c = self._SparseTensor_3x2() sp_d = self._SparseTensor_2x3() for concat_dim0 in (-2, 0): for concat_dim1 in (-1, 1): sp_concat_dim0 = sparse_ops.sparse_concat( concat_dim0, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True) sp_concat_dim1 = sparse_ops.sparse_concat( concat_dim1, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True) sp_concat_dim0_out = sess.run(sp_concat_dim0) sp_concat_dim1_out = sess.run(sp_concat_dim1) self.assertAllEqual(sp_concat_dim0_out.indices, [[0, 2], [1, 0], [2, 0], [2, 2], [4, 1], [5, 0], [5, 3], [5, 4], [7, 0], [8, 0], [9, 1], [10, 0], [10, 2]]) self.assertAllEqual(sp_concat_dim0_out.values, [1, 2, 3, 4, 1, 2, 1, 0, 1, 2, 1, 1, 2]) self.assertAllEqual(sp_concat_dim0_out.dense_shape, [11, 5]) self.assertAllEqual(sp_concat_dim1_out.indices, [[0, 2], [0, 11], [1, 0], [1, 4], [1, 8], [1, 10], [1, 12], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7], [2, 8]]) self.assertAllEqual(sp_concat_dim1_out.values, [1, 1, 2, 1, 1, 1, 2, 3, 4, 2, 1, 0, 2]) self.assertAllEqual(sp_concat_dim1_out.dense_shape, [3, 13]) def testShapeInferenceUnknownShapes(self): with self.session(use_gpu=False): sp_inputs = [ self._SparseTensor_UnknownShape(), self._SparseTensor_UnknownShape(val_shape=[3]), self._SparseTensor_UnknownShape(ind_shape=[1, 3]), self._SparseTensor_UnknownShape(shape_shape=[3]) ] for concat_dim in (-2, 0): sp_concat = sparse_ops.sparse_concat(concat_dim, sp_inputs) self.assertEqual(sp_concat.indices.get_shape().as_list(), [None, 3]) self.assertEqual(sp_concat.values.get_shape().as_list(), [None]) self.assertEqual(sp_concat.dense_shape.get_shape(), [3]) if __name__ == "__main__": test.main()
"""Test Z-Wave locks.""" from unittest.mock import MagicMock, patch import pytest from homeassistant import config_entries from homeassistant.components.zwave import const, lock from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed # Integration is disabled pytest.skip("Integration has been disabled in the manifest", allow_module_level=True) def test_get_device_detects_lock(mock_openzwave): """Test get_device returns a Z-Wave lock.""" node = MockNode() values = MockEntityValues( primary=MockValue(data=None, node=node), access_control=None, alarm_type=None, alarm_level=None, ) device = lock.get_device(node=node, values=values, node_config={}) assert isinstance(device, lock.ZwaveLock) def test_lock_turn_on_and_off(mock_openzwave): """Test turning on a Z-Wave lock.""" node = MockNode() values = MockEntityValues( primary=MockValue(data=None, node=node), access_control=None, alarm_type=None, alarm_level=None, ) device = lock.get_device(node=node, values=values, node_config={}) assert not values.primary.data device.lock() assert values.primary.data device.unlock() assert not values.primary.data def test_lock_value_changed(mock_openzwave): """Test value changed for Z-Wave lock.""" node = MockNode() values = MockEntityValues( primary=MockValue(data=None, node=node), access_control=None, alarm_type=None, alarm_level=None, ) device = lock.get_device(node=node, values=values, node_config={}) assert not device.is_locked values.primary.data = True value_changed(values.primary) assert device.is_locked def test_lock_state_workaround(mock_openzwave): """Test value changed for Z-Wave lock using notification state.""" node = MockNode(manufacturer_id="0090", product_id="0440") values = MockEntityValues( primary=MockValue(data=True, node=node), access_control=MockValue(data=1, node=node), alarm_type=None, alarm_level=None, ) device = lock.get_device(node=node, values=values) assert device.is_locked values.access_control.data = 2 value_changed(values.access_control) assert not device.is_locked def test_track_message_workaround(mock_openzwave): """Test value changed for Z-Wave lock by alarm-clearing workaround.""" node = MockNode( manufacturer_id="003B", product_id="5044", stats={"lastReceivedMessage": [0] * 6}, ) values = MockEntityValues( primary=MockValue(data=True, node=node), access_control=None, alarm_type=None, alarm_level=None, ) # Here we simulate an RF lock. The first lock.get_device will call # update properties, simulating the first DoorLock report. We then trigger # a change, simulating the openzwave automatic refreshing behavior (which # is enabled for at least the lock that needs this workaround) node.stats["lastReceivedMessage"][5] = const.COMMAND_CLASS_DOOR_LOCK device = lock.get_device(node=node, values=values) value_changed(values.primary) assert device.is_locked assert device.extra_state_attributes[lock.ATTR_NOTIFICATION] == "RF Lock" # Simulate a keypad unlock. We trigger a value_changed() which simulates # the Alarm notification received from the lock. Then, we trigger # value_changed() to simulate the automatic refreshing behavior. values.access_control = MockValue(data=6, node=node) values.alarm_type = MockValue(data=19, node=node) values.alarm_level = MockValue(data=3, node=node) node.stats["lastReceivedMessage"][5] = const.COMMAND_CLASS_ALARM value_changed(values.access_control) node.stats["lastReceivedMessage"][5] = const.COMMAND_CLASS_DOOR_LOCK values.primary.data = False value_changed(values.primary) assert not device.is_locked assert ( device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Unlocked with Keypad by user 3" ) # Again, simulate an RF lock. device.lock() node.stats["lastReceivedMessage"][5] = const.COMMAND_CLASS_DOOR_LOCK value_changed(values.primary) assert device.is_locked assert device.extra_state_attributes[lock.ATTR_NOTIFICATION] == "RF Lock" def test_v2btze_value_changed(mock_openzwave): """Test value changed for v2btze Z-Wave lock.""" node = MockNode(manufacturer_id="010e", product_id="0002") values = MockEntityValues( primary=MockValue(data=None, node=node), v2btze_advanced=MockValue(data="Advanced", node=node), access_control=MockValue(data=19, node=node), alarm_type=None, alarm_level=None, ) device = lock.get_device(node=node, values=values, node_config={}) assert device._v2btze assert not device.is_locked values.access_control.data = 24 value_changed(values.primary) assert device.is_locked def test_alarm_type_workaround(mock_openzwave): """Test value changed for Z-Wave lock using alarm type.""" node = MockNode(manufacturer_id="0109", product_id="0000") values = MockEntityValues( primary=MockValue(data=True, node=node), access_control=None, alarm_type=MockValue(data=16, node=node), alarm_level=None, ) device = lock.get_device(node=node, values=values) assert not device.is_locked values.alarm_type.data = 18 value_changed(values.alarm_type) assert device.is_locked values.alarm_type.data = 19 value_changed(values.alarm_type) assert not device.is_locked values.alarm_type.data = 21 value_changed(values.alarm_type) assert device.is_locked values.alarm_type.data = 22 value_changed(values.alarm_type) assert not device.is_locked values.alarm_type.data = 24 value_changed(values.alarm_type) assert device.is_locked values.alarm_type.data = 25 value_changed(values.alarm_type) assert not device.is_locked values.alarm_type.data = 27 value_changed(values.alarm_type) assert device.is_locked def test_lock_access_control(mock_openzwave): """Test access control for Z-Wave lock.""" node = MockNode() values = MockEntityValues( primary=MockValue(data=None, node=node), access_control=MockValue(data=11, node=node), alarm_type=None, alarm_level=None, ) device = lock.get_device(node=node, values=values, node_config={}) assert device.extra_state_attributes[lock.ATTR_NOTIFICATION] == "Lock Jammed" def test_lock_alarm_type(mock_openzwave): """Test alarm type for Z-Wave lock.""" node = MockNode() values = MockEntityValues( primary=MockValue(data=None, node=node), access_control=None, alarm_type=MockValue(data=None, node=node), alarm_level=None, ) device = lock.get_device(node=node, values=values, node_config={}) assert lock.ATTR_LOCK_STATUS not in device.extra_state_attributes values.alarm_type.data = 21 value_changed(values.alarm_type) assert ( device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Manually Locked None" ) values.alarm_type.data = 18 value_changed(values.alarm_type) assert ( device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Locked with Keypad by user None" ) values.alarm_type.data = 161 value_changed(values.alarm_type) assert device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Tamper Alarm: None" values.alarm_type.data = 9 value_changed(values.alarm_type) assert device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Deadbolt Jammed" def test_lock_alarm_level(mock_openzwave): """Test alarm level for Z-Wave lock.""" node = MockNode() values = MockEntityValues( primary=MockValue(data=None, node=node), access_control=None, alarm_type=MockValue(data=None, node=node), alarm_level=MockValue(data=None, node=node), ) device = lock.get_device(node=node, values=values, node_config={}) assert lock.ATTR_LOCK_STATUS not in device.extra_state_attributes values.alarm_type.data = 21 values.alarm_level.data = 1 value_changed(values.alarm_type) value_changed(values.alarm_level) assert ( device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Manually Locked by Key Cylinder or Inside thumb turn" ) values.alarm_type.data = 18 values.alarm_level.data = "alice" value_changed(values.alarm_type) value_changed(values.alarm_level) assert ( device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Locked with Keypad by user alice" ) values.alarm_type.data = 161 values.alarm_level.data = 1 value_changed(values.alarm_type) value_changed(values.alarm_level) assert ( device.extra_state_attributes[lock.ATTR_LOCK_STATUS] == "Tamper Alarm: Too many keypresses" ) async def setup_ozw(hass, mock_openzwave): """Set up the mock ZWave config entry.""" hass.config.components.add("zwave") config_entry = config_entries.ConfigEntry( 1, "zwave", "Mock Title", {"usb_path": "mock-path", "network_key": "mock-key"}, "test", ) await hass.config_entries.async_forward_entry_setup(config_entry, "lock") await hass.async_block_till_done() async def test_lock_set_usercode_service(hass, mock_openzwave): """Test the zwave lock set_usercode service.""" mock_network = hass.data[const.DATA_NETWORK] = MagicMock() node = MockNode(node_id=12) value0 = MockValue(data=" ", node=node, index=0) value1 = MockValue(data=" ", node=node, index=1) node.get_values.return_value = {value0.value_id: value0, value1.value_id: value1} mock_network.nodes = {node.node_id: node} await setup_ozw(hass, mock_openzwave) await hass.async_block_till_done() await hass.services.async_call( lock.DOMAIN, lock.SERVICE_SET_USERCODE, { const.ATTR_NODE_ID: node.node_id, lock.ATTR_USERCODE: "1234", lock.ATTR_CODE_SLOT: 1, }, ) await hass.async_block_till_done() assert value1.data == "1234" mock_network.nodes = {node.node_id: node} await hass.services.async_call( lock.DOMAIN, lock.SERVICE_SET_USERCODE, { const.ATTR_NODE_ID: node.node_id, lock.ATTR_USERCODE: "123", lock.ATTR_CODE_SLOT: 1, }, ) await hass.async_block_till_done() assert value1.data == "1234" async def test_lock_get_usercode_service(hass, mock_openzwave): """Test the zwave lock get_usercode service.""" mock_network = hass.data[const.DATA_NETWORK] = MagicMock() node = MockNode(node_id=12) value0 = MockValue(data=None, node=node, index=0) value1 = MockValue(data="1234", node=node, index=1) node.get_values.return_value = {value0.value_id: value0, value1.value_id: value1} await setup_ozw(hass, mock_openzwave) await hass.async_block_till_done() with patch.object(lock, "_LOGGER") as mock_logger: mock_network.nodes = {node.node_id: node} await hass.services.async_call( lock.DOMAIN, lock.SERVICE_GET_USERCODE, {const.ATTR_NODE_ID: node.node_id, lock.ATTR_CODE_SLOT: 1}, ) await hass.async_block_till_done() # This service only seems to write to the log assert mock_logger.info.called assert len(mock_logger.info.mock_calls) == 1 assert mock_logger.info.mock_calls[0][1][2] == "1234" async def test_lock_clear_usercode_service(hass, mock_openzwave): """Test the zwave lock clear_usercode service.""" mock_network = hass.data[const.DATA_NETWORK] = MagicMock() node = MockNode(node_id=12) value0 = MockValue(data=None, node=node, index=0) value1 = MockValue(data="123", node=node, index=1) node.get_values.return_value = {value0.value_id: value0, value1.value_id: value1} mock_network.nodes = {node.node_id: node} await setup_ozw(hass, mock_openzwave) await hass.async_block_till_done() await hass.services.async_call( lock.DOMAIN, lock.SERVICE_CLEAR_USERCODE, {const.ATTR_NODE_ID: node.node_id, lock.ATTR_CODE_SLOT: 1}, ) await hass.async_block_till_done() assert value1.data == "\0\0\0"
# -*- coding: utf-8 -*- import json import sys import warnings from django.conf import settings from django.contrib.auth.models import AnonymousUser, Permission from django.contrib.sites.models import Site from django.core.cache import cache from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.template.context import Context from django.test import testcases from django.test.client import RequestFactory from django.utils.translation import activate from menus.menu_pool import menu_pool from cms.models import Page from cms.test_utils.util.context_managers import (UserLoginContext, SettingsOverride) from cms.utils.compat.dj import get_user_model from cms.utils.compat.urls import urljoin, unquote from cms.utils.permissions import set_current_user URL_CMS_PAGE = "/en/admin/cms/page/" URL_CMS_PAGE_ADD = urljoin(URL_CMS_PAGE, "add/") URL_CMS_PAGE_CHANGE = urljoin(URL_CMS_PAGE, "%d/") URL_CMS_PAGE_ADVANCED_CHANGE = urljoin(URL_CMS_PAGE, "%d/advanced-settings/") URL_CMS_PAGE_PERMISSION_CHANGE = urljoin(URL_CMS_PAGE, "%d/permission-settings/") URL_CMS_PAGE_CHANGE_LANGUAGE = URL_CMS_PAGE_CHANGE + "?language=%s" URL_CMS_PAGE_CHANGE_TEMPLATE = URL_CMS_PAGE_CHANGE + "change_template/" URL_CMS_PAGE_PUBLISH = URL_CMS_PAGE_CHANGE + "%s/publish/" URL_CMS_PAGE_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete/") URL_CMS_PLUGIN_ADD = urljoin(URL_CMS_PAGE, "add-plugin/") URL_CMS_PLUGIN_EDIT = urljoin(URL_CMS_PAGE, "edit-plugin/") URL_CMS_PLUGIN_MOVE = urljoin(URL_CMS_PAGE, "move-plugin/") URL_CMS_PLUGIN_REMOVE = urljoin(URL_CMS_PAGE, "delete-plugin/") URL_CMS_TRANSLATION_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete-translation/") URL_CMS_PAGE_HISTORY = urljoin(URL_CMS_PAGE_CHANGE, "history/%d/") URL_CMS_PLUGIN_HISTORY_EDIT = urljoin(URL_CMS_PAGE_HISTORY, "edit-plugin/") class _Warning(object): def __init__(self, message, category, filename, lineno): self.message = message self.category = category self.filename = filename self.lineno = lineno def _collectWarnings(observeWarning, f, *args, **kwargs): def showWarning(message, category, filename, lineno, file=None, line=None): assert isinstance(message, Warning) observeWarning(_Warning( message.args[0], category, filename, lineno)) # Disable the per-module cache for every module otherwise if the warning # which the caller is expecting us to collect was already emitted it won't # be re-emitted by the call to f which happens below. for v in sys.modules.values(): if v is not None: try: v.__warningregistry__ = None except: # Don't specify a particular exception type to handle in case # some wacky object raises some wacky exception in response to # the setattr attempt. pass origFilters = warnings.filters[:] origShow = warnings.showwarning warnings.simplefilter('always') try: warnings.showwarning = showWarning result = f(*args, **kwargs) finally: warnings.filters[:] = origFilters warnings.showwarning = origShow return result class BaseCMSTestCase(object): counter = 1 def _fixture_setup(self): super(BaseCMSTestCase, self)._fixture_setup() self.create_fixtures() activate("en") def create_fixtures(self): pass def _post_teardown(self): menu_pool.clear() cache.clear() super(BaseCMSTestCase, self)._post_teardown() set_current_user(None) def login_user_context(self, user): return UserLoginContext(self, user) def _create_user(self, username, is_staff=False, is_superuser=False, is_active=True, add_default_permissions=False, permissions=None): """ Use this method to create users. Default permissions on page and text plugin are added if creating a non-superuser and `add_default_permissions` is set. Set `permissions` parameter to an iterable of permission codes to add custom permissios. """ User = get_user_model() fields = dict(email=username + '@django-cms.org', is_staff=is_staff, is_active=is_active, is_superuser=is_superuser ) # Check for special case where email is used as username if (get_user_model().USERNAME_FIELD != 'email'): fields[get_user_model().USERNAME_FIELD] = username user = User(**fields) user.set_password(getattr(user, get_user_model().USERNAME_FIELD)) user.save() if is_staff and not is_superuser and add_default_permissions: user.user_permissions.add(Permission.objects.get(codename='add_text')) user.user_permissions.add(Permission.objects.get(codename='delete_text')) user.user_permissions.add(Permission.objects.get(codename='change_text')) user.user_permissions.add(Permission.objects.get(codename='publish_page')) user.user_permissions.add(Permission.objects.get(codename='add_page')) user.user_permissions.add(Permission.objects.get(codename='change_page')) user.user_permissions.add(Permission.objects.get(codename='delete_page')) if is_staff and not is_superuser and permissions: for permission in permissions: user.user_permissions.add(Permission.objects.get(codename=permission)) return user def get_superuser(self): try: query = dict() if get_user_model().USERNAME_FIELD != "email": query[get_user_model().USERNAME_FIELD] = "admin" else: query[get_user_model().USERNAME_FIELD] = "[email protected]" admin = get_user_model().objects.get(**query) except get_user_model().DoesNotExist: admin = self._create_user("admin", is_staff=True, is_superuser=True) return admin def get_staff_user_with_no_permissions(self): """ Used in security tests """ staff = self._create_user("staff", is_staff=True, is_superuser=False) return staff def get_staff_user_with_std_permissions(self): """ This is a non superuser staff """ staff = self._create_user("staff", is_staff=True, is_superuser=False, add_permissions=True) return staff def get_new_page_data(self, parent_id=''): page_data = { 'title': 'test page %d' % self.counter, 'slug': 'test-page-%d' % self.counter, 'language': settings.LANGUAGES[0][0], 'template': 'nav_playground.html', 'parent': parent_id, 'site': 1, 'pagepermission_set-TOTAL_FORMS': 0, 'pagepermission_set-INITIAL_FORMS': 0, 'pagepermission_set-MAX_NUM_FORMS': 0, 'pagepermission_set-2-TOTAL_FORMS': 0, 'pagepermission_set-2-INITIAL_FORMS': 0, 'pagepermission_set-2-MAX_NUM_FORMS': 0 } # required only if user haves can_change_permission self.counter += 1 return page_data def get_new_page_data_dbfields(self, parent=None, site=None, language=None, template='nav_playground.html', ): page_data = { 'title': 'test page %d' % self.counter, 'slug': 'test-page-%d' % self.counter, 'language': settings.LANGUAGES[0][0] if not language else language, 'template': template, 'parent': parent if parent else None, 'site': site if site else Site.objects.get_current(), } self.counter = self.counter + 1 return page_data def get_pagedata_from_dbfields(self, page_data): """Converts data created by get_new_page_data_dbfields to data created from get_new_page_data so you can switch between test cases in api.create_page and client.post""" page_data['site'] = page_data['site'].id page_data['parent'] = page_data['parent'].id if page_data['parent'] else '' # required only if user haves can_change_permission page_data['pagepermission_set-TOTAL_FORMS'] = 0 page_data['pagepermission_set-INITIAL_FORMS'] = 0 page_data['pagepermission_set-MAX_NUM_FORMS'] = 0 page_data['pagepermission_set-2-TOTAL_FORMS'] = 0 page_data['pagepermission_set-2-INITIAL_FORMS'] = 0 page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0 return page_data def print_page_structure(self, qs): """Just a helper to see the page struct. """ for page in qs.order_by('path'): ident = " " * page.level print(u"%s%s (%s), path: %s, depth: %s, numchild: %s" % (ident, page, page.pk, page.path, page.depth, page.numchild)) def print_node_structure(self, nodes, *extra): def _rec(nodes, level=0): ident = level * ' ' for node in nodes: raw_attrs = [(bit, getattr(node, bit, node.attr.get(bit, "unknown"))) for bit in extra] attrs = ', '.join(['%s: %r' % data for data in raw_attrs]) print(u"%s%s: %s" % (ident, node.title, attrs)) _rec(node.children, level + 1) _rec(nodes) def assertObjectExist(self, qs, **filter): try: return qs.get(**filter) except ObjectDoesNotExist: pass raise self.failureException("ObjectDoesNotExist raised for filter %s" % filter) def assertObjectDoesNotExist(self, qs, **filter): try: qs.get(**filter) except ObjectDoesNotExist: return raise self.failureException("ObjectDoesNotExist not raised for filter %s" % filter) def copy_page(self, page, target_page, position='last-child'): from cms.utils.page import get_available_slug data = { 'position': position, 'target': target_page.pk, 'site': 1, 'copy_permissions': 'on', 'copy_moderation': 'on', } response = self.client.post(URL_CMS_PAGE + "%d/copy-page/" % page.pk, data) self.assertEqual(response.status_code, 200) # Altered to reflect the new django-js jsonified response messages expected = {"status": 200, "content": "ok"} self.assertEqual(json.loads(response.content.decode('utf8')), expected) title = page.title_set.all()[0] copied_slug = get_available_slug(title) if position in ('first-child', 'last-child'): parent = target_page else: parent = target_page.parent copied_page = self.assertObjectExist(Page.objects, title_set__slug=copied_slug, parent=parent) return copied_page def move_page(self, page, target_page, position="first-child"): page.move_page(target_page, position) return self.reload_page(page) def reload_page(self, page): """ Returns a fresh instance of the page from the database """ return self.reload(page) def reload(self, obj): return obj.__class__.objects.get(pk=obj.pk) def get_pages_root(self): return unquote(reverse("pages-root")) def get_context(self, path=None, page=None): if not path: path = self.get_pages_root() context = {} request = self.get_request(path, page=page) context['request'] = request return Context(context) def get_request(self, path=None, language=None, post_data=None, enforce_csrf_checks=False, page=None): factory = RequestFactory() if not path: path = self.get_pages_root() if not language: if settings.USE_I18N: language = settings.LANGUAGES[0][0] else: language = settings.LANGUAGE_CODE if post_data: request = factory.post(path, post_data) else: request = factory.get(path) request.session = self.client.session request.user = getattr(self, 'user', AnonymousUser()) request.LANGUAGE_CODE = language request._dont_enforce_csrf_checks = not enforce_csrf_checks if page: request.current_page = page else: request.current_page = None class MockStorage(object): def __len__(self): return 0 def __iter__(self): return iter([]) def add(self, level, message, extra_tags=''): pass def update(self, response): pass request._messages = MockStorage() return request def check_published_page_attributes(self, page): public_page = page.publisher_public if page.parent: self.assertEqual(page.parent_id, public_page.parent.publisher_draft.id) self.assertEqual(page.depth, public_page.depth) draft_siblings = list(Page.objects.filter(parent_id=page.parent_id, publisher_is_draft=True).order_by('path')) public_siblings = list(Page.objects.filter(parent_id=public_page.parent_id, publisher_is_draft=False).order_by('path')) skip = 0 for i, sibling in enumerate(draft_siblings): if not sibling.publisher_public_id: skip += 1 continue self.assertEqual(sibling.id, public_siblings[i - skip].publisher_draft.id) def failUnlessWarns(self, category, message, f, *args, **kwargs): warningsShown = [] result = _collectWarnings(warningsShown.append, f, *args, **kwargs) if not warningsShown: self.fail("No warnings emitted") first = warningsShown[0] for other in warningsShown[1:]: if ((other.message, other.category) != (first.message, first.category)): self.fail("Can't handle different warnings") self.assertEqual(first.message, message) self.assertTrue(first.category is category) return result assertWarns = failUnlessWarns class CMSTestCase(BaseCMSTestCase, testcases.TestCase): pass class TransactionCMSTestCase(BaseCMSTestCase, testcases.TransactionTestCase): pass class SettingsOverrideTestCase(CMSTestCase): settings_overrides = {} def _pre_setup(self): self._enter_settings_override() super(SettingsOverrideTestCase, self)._pre_setup() def _enter_settings_override(self): self._settings_ctx_manager = SettingsOverride(**self.settings_overrides) self._settings_ctx_manager.__enter__() def _post_teardown(self): super(SettingsOverrideTestCase, self)._post_teardown() self._exit_settings_override() def _exit_settings_override(self): self._settings_ctx_manager.__exit__(None, None, None)
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from django.db import models from fields import PickledObjectField from django.utils.dates import MONTHS, WEEKDAYS_ABBR # set timespans (e.g. EventSchedule.hours, EventSchedule.minutes) to # ALL when we want to schedule something for every hour/minute/etc. ALL = '*' # knowing which fields are related to time is useful # for a bunch of operations below # TIME_FIELDS should always reflect the names of # the sets of numbers which determine the scheduled time _TIME_FIELDS = ['minutes', 'hours', 'days_of_week', 'days_of_month', 'months'] class EventSchedule(models.Model): """ create a new EventSchedule and save it every time you want to register a new event on a schedule we can implement one_off future events by setting count to 1 All timespans less than the specified one must be set i.e. a weekly schedule must also specify which hour, minute, etc. However, all timespans greater than the specified one default to "all" (as long as one is specified). i.e. a weekly schedule will fire every month callback - all callback function must take as the first argument a reference to a 'router' object """ # whether this schedule is active or not callback = models.CharField(max_length=255, help_text="Name of Python callback function") # blank: ensure django validation doesn't force a value # null: set db value to be Null description = models.CharField(max_length=255, null=True, blank=True) # pickled set callback_args = PickledObjectField(null=True, blank=True) # pickled dictionary callback_kwargs = PickledObjectField(null=True, blank=True) # the following are pickled sets of numbers months = PickledObjectField(null=True, blank=True, help_text="'1,2,3' for jan, feb, march - '*' for all") days_of_month = PickledObjectField(null=True, blank=True, help_text="'1,2,3' for 1st, 2nd, 3rd - '*' for all") days_of_week = PickledObjectField(null=True, blank=True, help_text="'0,1,2' for mon, tue, wed - '*' for all") hours = PickledObjectField(null=True, blank=True, help_text="'0,1,2' for midnight, 1 o'clock, 2 - '*' for all") minutes = PickledObjectField(null=True, blank=True, help_text="'0,1,2' for X:00, X:01, X:02 - '*' for all") start_time = models.DateTimeField(null=True, blank=True, help_text="When do you want alerts to start? Leave blank for 'now'.") end_time = models.DateTimeField(null=True, blank=True, help_text="When do you want alerts to end? Leave blank for 'never'.") # how many times do we want this event to fire? optional count = models.IntegerField(null=True, blank=True, help_text="How many times do you want this to fire? Leave blank for 'continuously'") active = models.BooleanField(default=True) """ class Meta: permissions = ( ("can_view", "Can view"), ("can_edit", "Can edit"), ) """ # First, we must define some utility classes class AllMatch(set): """Universal set - match everything""" def __contains__(self, item): return True allMatch = AllMatch(['*']) class UndefinedSchedule(TypeError): """ raise this error when attempting to save a schedule with a greater timespan specified without specifying the lesser timespans i.e. scheduling an event for every hour without specifying what minute """ pass def __str__(self): return unicode(self).encode('utf-8') def __unicode__(self): def _set_to_string(set, conversion_dict=None): if len(set)>0: if conversion_dict is not None: return ", ".join( [unicode(conversion_dict[m]) for m in set] ) else: return ", ".join( [unicode(m) for m in set] ) else: return 'All' months = _set_to_string(self.months, MONTHS) days_of_month = _set_to_string(self.days_of_month) days_of_week = _set_to_string(self.days_of_week, WEEKDAYS_ABBR) hours = _set_to_string(self.hours) minutes = _set_to_string(self.minutes) return "%s: Months:(%s), Days of Month:(%s), Days of Week:(%s), Hours:(%s), Minutes:(%s)" % \ ( self.callback, months, days_of_month, days_of_week, hours, minutes ) def __init__(self, *args, **kwargs): # these 3 lines allow users to create eventschedules from arrays # and not just sets (since lots of people don't know sets) for time in _TIME_FIELDS: if time in kwargs and isinstance(kwargs[time],list): kwargs[time] = set( kwargs[time] ) super(EventSchedule, self).__init__(*args, **kwargs) if self.callback_args is None: self.callback_args = [] if self.callback_kwargs is None: self.callback_kwargs = {} for time in _TIME_FIELDS: if getattr(self, time) is None: setattr(self,time, set()) # TODO: define these helper functions # def set_daily(self): # def set_weekly(self): etc. @staticmethod def validate(months, days_of_month, days_of_week, hours, minutes): """ The following function doesn't touch data: it just checks for valid boundaries when a timespan is set, all sub-timespans must also be set i.e. when a weekly schedule is set, one must also specify day, hour, and minute. """ EventSchedule.validate_ranges(months, days_of_month, days_of_week, hours, minutes) EventSchedule.validate_subtimespans(months, days_of_month, days_of_week, hours, minutes) @staticmethod def validate_ranges(months, days_of_month, days_of_week, hours, minutes): EventSchedule.check_minutes_bounds(minutes) EventSchedule.check_hours_bounds(hours) EventSchedule.check_days_of_week_bounds(days_of_week) EventSchedule.check_days_of_month_bounds(days_of_month) EventSchedule.check_months_bounds(months) @staticmethod def validate_subtimespans(months, days_of_month, days_of_week, hours, minutes): if len(minutes)==0 and len(hours)==0 and len(days_of_week)==0 and \ len(days_of_month)==0 and len(months)==0: raise TypeError("Must specify a time interval for schedule") if len(hours)>0 and len(minutes)==0: raise EventSchedule.UndefinedSchedule("Must specify minute(s)") if len(days_of_week)>0 and len(hours)==0: raise EventSchedule.UndefinedSchedule("Must specify hour(s)") if len(days_of_month)>0 and len(hours)==0: raise EventSchedule.UndefinedSchedule("Must specify hour(s)") if len(months)>0 and len(days_of_month)==0 and len(days_of_week)==0: raise EventSchedule.UndefinedSchedule("Must specify day(s)") # we break these out so we can reuse them in forms.py @staticmethod def check_minutes_bounds(minutes): check_bounds('Minutes', minutes, 0, 59) @staticmethod def check_hours_bounds(hours): check_bounds('Hours', hours, 0, 23) @staticmethod def check_days_of_week_bounds(days_of_week): check_bounds('Days of Week', days_of_week, 0, 6) @staticmethod def check_days_of_month_bounds(days_of_month): check_bounds('Days of Month', days_of_month, 1, 31) @staticmethod def check_months_bounds(months): check_bounds('Months', months, 1, 12) def save(self, *args, **kwargs): """ TODO - still need to fix this so that creating a schedule in the ui, saving it, editing it, saving it, editing it continues to work with callback_args, kwargs, and different timespans (currently fails because set([1,2]) -> a string) """ # transform all the input into known data structures for time in _TIME_FIELDS: val = getattr(self, time) if val is None or len(val)==0: # set default value to empty set setattr(self,time,set()) if isinstance(val,list): # accept either lists or sets, but turn all lists into sets val = set(val) setattr(self,time,val) if not self._valid(getattr(self,time)): raise TypeError("%s must be specified as " % time + "sets of numbers, an empty set, or '*'") # validate those data structures self.validate(self.months, self.days_of_month, self.days_of_week, self.hours, self.minutes) super(EventSchedule, self).save(*args, **kwargs) def should_fire(self, when): """Return True if this event should trigger at the specified datetime """ if self.start_time: if self.start_time > when: return False if self.end_time: if self.end_time < when: return False # The internal variables in this function are because allMatch doesn't # pickle well. This would be alleviated if this functionality were optimized # to stop doing db calls on every fire minutes = self.minutes hours = self.hours days_of_week = self.days_of_week days_of_month = self.days_of_month months = self.months if self.minutes == '*': minutes = self.allMatch if self.hours == '*': hours = self.allMatch if self.days_of_week == '*': days_of_week = self.allMatch if self.days_of_month == '*': days_of_month = self.allMatch if self.months == '*': months = self.allMatch # when a timespan is set, all super-timespans default to 'all' # i.e. a schedule specified for hourly will automatically be sent # every day, week, and month. if len(months) == 0: months=self.allMatch if months == self.allMatch: if len(days_of_month)==0: days_of_month = self.allMatch if len(days_of_week)==0: days_of_week = self.allMatch if len(hours) == 0 and days_of_month==self.allMatch and \ days_of_week == self.allMatch: hours = self.allMatch # self.minutes will never be empty # the following ensures that 'days of month' will override empty 'day of week' # and vice versa if len(days_of_month)>0 and len(days_of_week)==0: days_of_week = self.allMatch if len(days_of_week)>0 and len(days_of_month)==0: days_of_month = self.allMatch return ((when.minute in minutes) and (when.hour in hours) and (when.day in days_of_month) and (when.weekday() in days_of_week) and (when.month in months)) def activate(self): self.active = True self.save() def deactivate(self): self.active = False self.save() def _valid(self, timespan): if isinstance(timespan, set) or timespan == '*': return True return False ############################ # global utility functions # ############################ def set_weekly_event(callback, day, hour, minute, callback_args): # relies on all the built-in checks in EventSchedule.save() schedule = EventSchedule(callback=callback, hours=set([hour]), \ days_of_week=set([day]), minutes=set([minute]), \ callback_args=callback_args ) schedule.save() def set_daily_event(callback, hour, minute, callback_args): # relies on all the built-in checks in EventSchedule.save() schedule = EventSchedule(callback=callback, hours=set([hour]), \ minutes=set([minute]), \ callback_args=callback_args ) schedule.save() # check valid values def check_bounds(name, time_set, min, max): if time_set!='*': # ignore AllMatch/'*' for m in time_set: # check all values in set if int(m) < min or int(m) > max: raise TypeError("%s (%s) must be a value between %s and %s" % \ (name, m, min, max))
# Copyright 2021 The TF-Coder Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for collect_tensor_data.""" from absl.testing import absltest from absl.testing import parameterized import mock import six import tensorflow as tf from tf_coder.datasets import collect_tensor_data from tf_coder.value_search import all_operations from tf_coder.value_search import value as value_module from tf_coder.value_search import value_search_settings as settings_module def new_input(raw_value): return value_module.InputValue(raw_value, 'NEW_INPUT') class CollectTensorDataTest(parameterized.TestCase): def setUp(self): super(CollectTensorDataTest, self).setUp() self.settings = settings_module.default_settings() operations = all_operations.get_operations() self.unique_with_counts_operation = all_operations.find_operation_with_name( 'tf.unique_with_counts(x)', operation_list=operations) self.indexing_operation = all_operations.find_operation_with_name( 'IndexingOperation', operation_list=operations) self.gather_operation = all_operations.find_operation_with_name( 'tf.gather(params, indices)', operation_list=operations) self.add_operation = all_operations.find_operation_with_name( 'tf.add(x, y)', operation_list=operations) # Example with many operations. in1 = value_module.InputValue([1, 1, 2, 5, 6, 5], 'in1') in2 = value_module.InputValue([0, 10, 20, 30, 40, 50, 60, 70], 'in2') constant_1 = value_module.ConstantValue(1) unique = self.unique_with_counts_operation.apply([in1], self.settings) indexed = self.indexing_operation.apply([unique, constant_1], self.settings) gathered = self.gather_operation.apply([in2, in1], self.settings) self.example_value_1 = self.add_operation.apply([indexed, gathered], self.settings) self.assertEqual( self.example_value_1.reconstruct_expression(), 'tf.add(tf.unique_with_counts(in1)[1], tf.gather(in2, in1))') self.assertEqual(self.example_value_1, value_module.OutputValue([10, 10, 21, 52, 63, 52])) # Example with many variables and new inputs. in3 = value_module.InputValue([1], 'in3') in4 = value_module.InputValue([2], 'in4') a = self.add_operation.apply([in3, new_input([10])], self.settings) b = self.add_operation.apply([in4, in3], self.settings) c = self.add_operation.apply([new_input([20]), in3], self.settings) d = self.add_operation.apply([a, b], self.settings) self.example_value_2 = self.add_operation.apply([c, d], self.settings) self.assertEqual( self.example_value_2.reconstruct_expression(), 'tf.add(tf.add(NEW_INPUT, in3), ' 'tf.add(tf.add(in3, NEW_INPUT), tf.add(in4, in3)))') self.assertEqual(self.example_value_2, value_module.OutputValue([35])) def test_extract_values_with_collapsed_subtrees(self): expected_expressions = [ 'tf.add(tf.unique_with_counts(in1)[1], tf.gather(in2, in1))', 'tf.add(tf.unique_with_counts(in1)[1], NEW_INPUT)', 'tf.add(NEW_INPUT[1], tf.gather(in2, in1))', 'tf.add(NEW_INPUT[1], NEW_INPUT)', 'tf.add(NEW_INPUT, tf.gather(in2, in1))', 'tf.add(NEW_INPUT, NEW_INPUT)', 'NEW_INPUT', ] results = collect_tensor_data.extract_values_with_collapsed_subtrees( self.example_value_1) # The order doesn't matter except that 'NEW_INPUT' is last. self.assertEqual([v.reconstruct_expression() for v in results], expected_expressions) def test_count_num_inputs(self): self.assertEqual(collect_tensor_data.count_num_inputs(self.example_value_2), (4, {'in3': 3, 'in4': 1, 'NEW_INPUT': 2})) def test_normalize_names_and_extract_values(self): name_counter = {'in3': 3, 'in4': 1, 'NEW_INPUT': 2} copied_value = self.example_value_2.copy() name_value_map = collect_tensor_data.normalize_names_and_extract_values( copied_value, name_counter) self.assertEqual( copied_value.reconstruct_expression(use_cache=False), # in3 -> in1, in4 -> in2, and NEW_INPUT -> in3 and in4 'tf.add(tf.add(in3, in1), tf.add(tf.add(in1, in4), tf.add(in2, in1)))') self.assertEqual(name_value_map, # Check the raw values. {'in1': value_module.OutputValue([1]), 'in2': value_module.OutputValue([2]), 'in3': value_module.OutputValue([20]), 'in4': value_module.OutputValue([10])}) for name in name_value_map: self.assertIsInstance(name_value_map[name], value_module.InputValue) self.assertEqual(name, name_value_map[name].name) def test_extract_operations(self): self.assertCountEqual( collect_tensor_data.extract_operations(self.example_value_1), [self.unique_with_counts_operation, self.indexing_operation, self.gather_operation, self.add_operation]) self.assertCountEqual( collect_tensor_data.extract_operations(self.example_value_2), [self.add_operation] * 5) @parameterized.named_parameters( ('1', 1, []), ('2', 2, ['tf.add(tf.unique_with_counts(in1)[1], tf.gather(in2, in1))', 'tf.add(tf.unique_with_counts(in1)[1], in2)', 'tf.add(in1[1], in2)', 'tf.add(in1, in2)']), ('3', 3, ['tf.add(tf.unique_with_counts(in1)[1], tf.gather(in2, in1))', 'tf.add(tf.unique_with_counts(in1)[1], in2)', 'tf.add(in3[1], tf.gather(in2, in1))', 'tf.add(in1[1], in2)', 'tf.add(in3, tf.gather(in2, in1))', 'tf.add(in1, in2)'])) def test_extract_examples_from_value(self, max_num_inputs, expected_expressions): actual = collect_tensor_data.extract_examples_from_value( self.example_value_1, max_num_inputs=max_num_inputs) # Check that all expressions are as expected. self.assertCountEqual([example.expression for example in actual], expected_expressions) # Check all elements of one IOExample namedtuple. This example is at index 1 # when max_num_inputs > 1. if max_num_inputs > 1: expected_index_1 = collect_tensor_data.IOExample( expression='tf.add(tf.unique_with_counts(in1)[1], in2)', input_values=[value_module.InputValue([1, 1, 2, 5, 6, 5], 'in1'), value_module.InputValue([10, 10, 20, 50, 60, 50], 'in2')], output_value=value_module.OutputValue([10, 10, 21, 52, 63, 52]), num_inputs=2, operations=[self.add_operation, self.indexing_operation, self.unique_with_counts_operation]) self.assertEqual(actual[1], expected_index_1) # Equality of Value objects is done by comparing the wrapped values. Check # the names in input_values too. for actual_value, expected_value in zip(actual[1].input_values, expected_index_1.input_values): self.assertIsInstance(actual_value, value_module.InputValue) self.assertEqual(actual_value.name, expected_value.name) # Check that all extracted examples actually work by eval-ing them. for example in actual: namespace_dict = {'tf': tf} self.assertLen(example.input_values, example.num_inputs) for input_value in example.input_values: namespace_dict[input_value.name] = input_value.value eval_output = eval(example.expression, namespace_dict) # pylint: disable=eval-used self.assertEqual(value_module.OutputValue(eval_output), example.output_value) self.assertEqual(example.output_value, self.example_value_1) def test_extract_examples_from_value_without_inputs(self): constant_1 = value_module.ConstantValue(1) constant_2 = value_module.ConstantValue(2) constant_3 = value_module.ConstantValue(3) subtree = self.add_operation.apply([constant_1, constant_2], self.settings) without_inputs = self.add_operation.apply([subtree, constant_3], self.settings) actual = collect_tensor_data.extract_examples_from_value(without_inputs) self.assertCountEqual( [example.expression for example in actual], # `tf.add(tf.add(1, 2), 3)` has no inputs and is not included. ['tf.add(in1, 3)']) def test_create_tf_examples(self): sparse_tensor = tf.SparseTensor( values=[0, -15, 30], indices=[[12], [34], [56]], dense_shape=[100]) # This example does not represent a realistic tensor transformation. It uses # variety in the input/output tensors to exercise the featurization. io_example = collect_tensor_data.IOExample( expression='tf.dummy_expression(in1, in2)', input_values=[ value_module.InputValue([[[0.5, 2.5, 9.0], [-0.25, 0.0, 1.25]]], 'in1'), value_module.InputValue(sparse_tensor, 'in2'), ], output_value=value_module.OutputValue([[1.0], [0.0], [1.0], [0.0]]), num_inputs=2, operations=[self.add_operation, self.add_operation, self.gather_operation]) with mock.patch.object(collect_tensor_data, 'COUNT_BOUNDARIES', new=[0, 1, 3, 50, float('inf')]): with mock.patch.object( collect_tensor_data, 'FLOAT_BOUNDARIES', new=[-float('inf'), -10, -1e-8, 1e-8, 10, float('inf')]): tf_examples = collect_tensor_data.create_tf_examples(io_example) operation_list = all_operations.get_operations( include_sparse_operations=True) expected_operations = [ 2 if op.name == 'tf.add(x, y)' else 1 if op.name == 'tf.gather(params, indices)' else 0 for op in operation_list] expected_tf_example_1 = { # Features from featurize_value. 'kind': [2, 2, 3, 0], 'dtype': [8, 8, 0, 0], 'rank': [2, 3, 1, 0], 'shape': [4, 1, 0, 0, 1, 2, 3, 0, 100, 0, 0, 0, 0, 0, 0, 0], 'shape_buckets': [2, 1, 0, 0, 1, 1, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0], 'floats': [1.0, 0.0, 0.5, 0.5, 9.0, -0.25, 13/6, 13.5/6, 30, -15, 5, 15, 0, 0, 0, 0], 'float_buckets': [3, 2, 3, 3, 3, 1, 3, 3, 4, 0, 3, 4, 2, 2, 2, 2], 'counts': [4, 4, 2, 2, 2, 0, 4, 2, 6, 6, 4, 1, 0, 1, 2, 6, 100, 3, 1, 1, 0, 1, 1, 3, 1, 1, 0, 1, 0, 0, 1, 1], 'count_buckets': [2, 2, 1, 1, 1, 0, 2, 1, 2, 2, 2, 1, 0, 1, 1, 2, 3, 2, 1, 1, 0, 1, 1, 2, 1, 1, 0, 1, 0, 0, 1, 1], 'fractions': [4/4, 2/4, 2/4, 2/4, 0/4, 4/4, 2/4, 6/6, 4/6, 1/6, 0/6, 1/6, 2/6, 6/6, 3/100, 1/3, 1/3, 0/3, 1/3, 1/3, 3/3, 1/1, 0/1, 1/1, 0/1, 0/1, 1/1, 1/1], 'booleans': [1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1], 'value_string': [ b'tf.float32:[[1.0], [0.0], [1.0], [0.0]]', b'tf.float32:[[[0.5, 2.5, 9.0], [-0.25, 0.0, 1.25]]]', (b'SparseTensor(indices=tf.Tensor(\n[[12]\n [34]\n [56]], ' b'shape=(3, 1), dtype=int64), ' b'values=tf.Tensor([ 0 -15 30], shape=(3,), dtype=int32), ' b'dense_shape=tf.Tensor([100], shape=(1,), dtype=int64))'), b'0'], # Features from featurize_input_and_output. 'io_comparisons': [2, 2, 2, 0, 2, 2, 1, 2, 0, 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'io_booleans': [0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], 'io_counts': [1, 2, 1, 1, 2, 1, 1, 1, 1], 'io_count_buckets': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'io_fractions': [1/6, 2/4, 1/6, 1/4, 1/3, 2/4, 1/3, 1/4, 1/1, 1/1, 1/1, 1/1], # Features added in create_examples. 'num_inputs': [2], 'operations': expected_operations, 'expression': [b'tf.dummy_expression(in1, in2)'], } print(tf_examples) self.assertLen(tf_examples, 2) actual_tf_example_1, actual_tf_example_2 = tf_examples # Check the entire first example. for key, expected in six.iteritems(expected_tf_example_1): some_list = actual_tf_example_1.features.feature[key] if some_list.HasField('float_list'): actual = some_list.float_list.value actual = [round(f, 6) for f in actual] expected = [round(f, 6) for f in expected] elif some_list.HasField('int64_list'): actual = some_list.int64_list.value elif some_list.HasField('bytes_list'): actual = some_list.bytes_list.value else: self.fail('Failed to extract list from TF example.') # Printing side-by-side like this in the test log is more helpful than the # AssertionError message. Look at the Python3 log, which prints ints # without the L suffix. print('key: {}\n' ' expected: {}\n' ' got: {}'.format(key, expected, actual)) self.assertEqual(actual, expected) # Less rigorous checks for the second example, where the two inputs have # swapped. self.assertEqual( actual_tf_example_2.features.feature['rank'].int64_list.value, [2, 1, 3, 0]) self.assertEqual( actual_tf_example_2.features.feature['shape'].int64_list.value, [4, 1, 0, 0, 100, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0]) if __name__ == '__main__': absltest.main()
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # stdlib import os # 3p # project from tests.checks.common import AgentCheckTest from checks import CheckException import mock FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci') class TestZabbixInvalidConfig(AgentCheckTest): CHECK_NAME = 'zabbix' def test_missing_zabbix_url(self): config = { 'init_config': {}, 'instances': [ { 'user': 'Admin', 'password': 'zabbix' } ] } with self.assertRaises(CheckException) as context: self.run_check(config) self.assertEqual('Missing API url in configuration.', str(context.exception)) def test_missing_zabbix_user(self): config = { 'init_config': {}, 'instances': [ { 'url': "http://host/zabbix/api_jsonrpc.php", 'password': 'zabbix' } ] } with self.assertRaises(CheckException) as context: self.run_check(config) self.assertEqual('Missing API user in configuration.', str(context.exception)) def test_missing_zabbix_password(self): config = { 'init_config': {}, 'instances': [ { 'url': "http://host/zabbix/api_jsonrpc.php", 'user': 'Admin' } ] } with self.assertRaises(CheckException) as context: self.run_check(config) self.assertEqual('Missing API password in configuration.', str(context.exception)) class TestZabbix(AgentCheckTest): CHECK_NAME = 'zabbix' _config = { 'init_config': {}, 'instances': [ { 'url': "http://host/zabbix/api_jsonrpc.php", 'user': 'Admin', 'password': 'zabbix' } ] } @staticmethod def _apiinfo_response(): return { "jsonrpc": "2.0", "result": ["4.0.4"], "id": 1 } @staticmethod def _zabbix_host_response(): return { "jsonrpc": "2.0", "result": [ { "hostid": "10084", "host": "zabbix01.example.com", "name": "Zabbix server", "groups": [ { "groupid": "4", "name": "Zabbix servers" } ] } ], "id": 1 } @staticmethod def _zabbix_problem(): return { "jsonrpc": "2.0", "result": [ { "eventid": "14", "source": "0", "object": "0", "objectid": "13491", "clock": "1549878981", "ns": "221836547", "r_eventid": "0", "r_clock": "0", "r_ns": "0", "correlationid": "0", "userid": "0", "name": "Zabbix agent on Zabbix server is unreachable for 5 minutes", "acknowledged": "0", "severity": "3", "acknowledges": [], "suppressed": 0 } ], "id": 1 } @staticmethod def _zabbix_trigger(): return { "jsonrpc": "2.0", "result": [ { "triggerid": "13491", "expression": "{12900}=1", "description": "Zabbix agent on {HOST.NAME} is unreachable for 5 minutes", "url": "", "status": "0", "value": "1", "priority": "3", "lastchange": "1549878981", "comments": "", "error": "", "templateid": "10047", "type": "0", "state": "0", "flags": "0", "recovery_mode": "0", "recovery_expression": "", "correlation_mode": "0", "correlation_tag": "", "manual_close": "0" } ], "id": 1 } @staticmethod def _zabbix_event(): return { "jsonrpc": "2.0", "result": [ { "eventid": "14", "value": "1", "severity": "3", "acknowledged": "0", "hosts": [ { "hostid": "10084" } ], "relatedObject": { "triggerid": "13491", "description": "Zabbix agent on {HOST.NAME} is unreachable for 5 minutes", "priority": "3" } } ], "id": 1 } def test_zabbix_topology_hosts(self): def _mocked_method_request(url, name, auth=None, params={}, request_id=1): if name == "apiinfo.version": return self._apiinfo_response() elif name == "host.get": return self._zabbix_host_response() else: self.fail("TEST FAILED on making invalid request") self.run_check(self._config, mocks={ 'method_request': _mocked_method_request, 'login': lambda url, user, password: "dummyauthtoken", 'retrieve_problems': lambda url, auth: [], 'retrieve_events': lambda url, auth, event_ids: [] }) topo_instances = self.check.get_topology_instances() self.assertEqual(len(topo_instances), 1) self.assertEqual(len(topo_instances[0]['components']), 1) self.assertEqual(len(topo_instances[0]['relations']), 0) component = topo_instances[0]['components'][0] self.assertEqual(component['externalId'], 'urn:host:/zabbix01.example.com') self.assertEqual(component['type']['name'], 'zabbix_host') self.assertEqual(component['data']['name'], 'Zabbix server') self.assertEqual(component['data']['host_id'], '10084') self.assertEqual(component['data']['host'], 'zabbix01.example.com') self.assertEqual(component['data']['layer'], 'Host') self.assertEqual(component['data']['domain'], 'Zabbix servers') self.assertEqual(component['data']['identifiers'], ['zabbix01.example.com']) self.assertEqual(component['data']['environment'], 'Production') self.assertEqual(component['data']['host_groups'], ['Zabbix servers']) labels = component['data']['labels'] for label in ['zabbix', 'host group:Zabbix servers']: if label not in labels: self.fail("Component does not have label '%s'." % label) def test_zabbix_topology_non_default_environment(self): def _mocked_method_request(url, name, auth=None, params={}, request_id=1): if name == "apiinfo.version": return self._apiinfo_response() elif name == "host.get": return self._zabbix_host_response() else: self.fail("TEST FAILED on making invalid request") config = self._config config['instances'][0]['stackstate_environment'] = 'MyTestEnvironment' self.run_check(config, mocks={ 'method_request': _mocked_method_request, 'login': lambda url, user, password: "dummyauthtoken", 'retrieve_problems': lambda url, auth: [], 'retrieve_events': lambda url, auth, event_ids: [] }) topo_instances = self.check.get_topology_instances() self.assertEqual(len(topo_instances), 1) self.assertEqual(len(topo_instances[0]['components']), 1) self.assertEqual(len(topo_instances[0]['relations']), 0) component = topo_instances[0]['components'][0] self.assertEqual(component['data']['environment'], 'MyTestEnvironment') labels = component['data']['labels'] for label in ['zabbix', 'host group:Zabbix servers']: if label not in labels: self.fail("Component does not have label '%s'." % label) def test_zabbix_topology_multiple_host_groups(self): """ Zabbix hosts can be placed in multiple host groups. When there is only one host group we place the host component in the StackState domain with the host group's name. However, when there are multiple host groups we use StackState domain 'Zabbix' """ def _mocked_method_request(url, name, auth=None, params={}, request_id=1): if name == "apiinfo.version": return self._apiinfo_response() elif name == "host.get": response = self._zabbix_host_response() response['result'][0]['groups'].append( { "groupid": "5", "name": "MyHostGroup" } ) return response else: self.fail("TEST FAILED on making invalid request") self.run_check(self._config, mocks={ 'method_request': _mocked_method_request, 'login': lambda url, user, password: "dummyauthtoken", 'retrieve_problems': lambda url, auth: [], 'retrieve_events': lambda url, auth, event_ids: [] }) topo_instances = self.check.get_topology_instances() self.assertEqual(len(topo_instances), 1) self.assertEqual(len(topo_instances[0]['components']), 1) self.assertEqual(len(topo_instances[0]['relations']), 0) component = topo_instances[0]['components'][0] self.assertEqual(component['data']['domain'], 'Zabbix') labels = component['data']['labels'] for label in ['zabbix', 'host group:Zabbix servers', 'host group:MyHostGroup']: if label not in labels: self.fail("Component does not have label '%s'." % label) def test_zabbix_problems(self): def _mocked_method_request(url, name, auth=None, params={}, request_id=1): if name == "apiinfo.version": return self._apiinfo_response() elif name == "host.get": return self._zabbix_host_response() elif name == "problem.get": return self._zabbix_problem() elif name == "trigger.get": return self._zabbix_trigger() elif name == "event.get": return self._zabbix_event() else: self.fail("TEST FAILED on making invalid request") self.run_check(self._config, mocks={ 'method_request': _mocked_method_request, 'login': lambda url, user, password: "dummyauthtoken", }) self.assertEqual(len(self.events), 1) event = self.events[0] self.assertEqual(event['source_type_name'], 'Zabbix') tags = event['tags'] for tag in ['host_id:10084', 'severity:3', "triggers:['Zabbix agent on {HOST.NAME} is unreachable for 5 minutes']", "host:zabbix01.example.com", "host_name:Zabbix server"]: if tag not in tags: self.fail("Event does not have tag '%s', got: %s." % (tag, tags)) self.assertEqual(len(tags), 5) def test_zabbix_no_problems(self): """ When there are no problems, we are expecting all host components to go to green. To make this happen we need to send an event that says all is OK. """ def _mocked_method_request(url, name, auth=None, params={}, request_id=1): if name == "apiinfo.version": return self._apiinfo_response() elif name == "host.get": return self._zabbix_host_response() elif name == "problem.get": response = self._zabbix_problem() response['result'] = [] return response else: self.fail("TEST FAILED on making invalid request") self.run_check(self._config, mocks={ 'method_request': _mocked_method_request, 'login': lambda url, user, password: "dummyauthtoken", }) self.assertEqual(len(self.events), 1) event = self.events[0] self.assertEqual(event['source_type_name'], 'Zabbix') tags = event['tags'] for tag in ['host_id:10084', 'severity:0', "triggers:[]", "host:zabbix01.example.com", "host_name:Zabbix server"]: if tag not in tags: self.fail("Event does not have tag '%s', got: %s." % (tag, tags)) self.assertEqual(len(tags), 5) def test_zabbix_determine_most_severe_state(self): """ A host can have multiple active problems. From the active problems we determine the most severe state and send that to StackState """ def _mocked_method_request(url, name, auth=None, params={}, request_id=1): if name == "apiinfo.version": return self._apiinfo_response() elif name == "host.get": return self._zabbix_host_response() elif name == "problem.get": response = self._zabbix_problem() response['result'].append({ "eventid": "100", "source": "0", "object": "0", "objectid": "111", "clock": "1549878981", "ns": "221836547", "r_eventid": "0", "r_clock": "0", "r_ns": "0", "correlationid": "0", "userid": "0", "name": "My very own problem", "acknowledged": "0", "severity": "5", "acknowledges": [], "suppressed": 0 }) return response elif name == "trigger.get": return self._zabbix_trigger() elif name == "event.get": response = self._zabbix_event() response['result'].append({ "eventid": "100", "value": "1", "severity": "5", "acknowledged": "0", "hosts": [ { "hostid": "10084" } ], "relatedObject": { "triggerid": "111", "description": "My very own problem", "priority": "5" } }) return response else: self.fail("TEST FAILED on making invalid request") self.run_check(self._config, mocks={ 'method_request': _mocked_method_request, 'login': lambda url, user, password: "dummyauthtoken", }) self.assertEqual(len(self.events), 1) event = self.events[0] self.assertEqual(event['source_type_name'], 'Zabbix') tags = event['tags'] for tag in [ 'host_id:10084', 'severity:5', "triggers:['Zabbix agent on {HOST.NAME} is unreachable for 5 minutes', 'My very own problem']", "host:zabbix01.example.com", "host_name:Zabbix server" ]: if tag not in tags: self.fail("Event does not have tag '%s', got: %s." % (tag, tags)) self.assertEqual(len(tags), 5) def validate_requests_ssl_verify_setting(self, config_to_use, expected_verify_value): """ Helper for testing whether the yaml setting ssl_verify is respected by mocking requests.get Mocking all the Zabbix functions that talk HTTP via requests.get, excluding the function `check_connection` Function check_connection is the first function that talks HTTP. """ with mock.patch('requests.get') as mock_get: self.run_check(config_to_use, mocks={ 'login': lambda url, user, password: "dummyauthtoken", 'retrieve_hosts': lambda x, y: [], 'retrieve_problems': lambda url, auth: [], 'retrieve_events': lambda url, auth, event_ids: [] }) mock_get.assert_called_once_with('http://host/zabbix/api_jsonrpc.php', json={'params': {}, 'jsonrpc': '2.0', 'method': 'apiinfo.version', 'id': 1}, verify=expected_verify_value) def test_zabbix_respect_false_ssl_verify(self): config = self._config config['instances'][0]['ssl_verify'] = False self.validate_requests_ssl_verify_setting(config, False) def test_zabbix_respect_true_ssl_verify(self): config = self._config config['instances'][0]['ssl_verify'] = True self.validate_requests_ssl_verify_setting(config, True) def test_zabbix_respect_default_ssl_verify(self): self.validate_requests_ssl_verify_setting(self._config, True)
"""The tests for the google calendar platform.""" from __future__ import annotations import copy from http import HTTPStatus from typing import Any, Callable from unittest.mock import Mock, patch import httplib2 import pytest from homeassistant.components.google import ( CONF_CAL_ID, CONF_CLIENT_ID, CONF_CLIENT_SECRET, CONF_DEVICE_ID, CONF_ENTITIES, CONF_IGNORE_AVAILABILITY, CONF_NAME, CONF_TRACK, DEVICE_SCHEMA, SERVICE_SCAN_CALENDARS, GoogleCalendarService, do_setup, ) from homeassistant.const import STATE_OFF, STATE_ON from homeassistant.helpers.template import DATE_STR_FORMAT from homeassistant.setup import async_setup_component from homeassistant.util import slugify import homeassistant.util.dt as dt_util from .conftest import TEST_CALENDAR from tests.common import async_mock_service GOOGLE_CONFIG = {CONF_CLIENT_ID: "client_id", CONF_CLIENT_SECRET: "client_secret"} TEST_ENTITY = "calendar.we_are_we_are_a_test_calendar" TEST_ENTITY_NAME = "We are, we are, a... Test Calendar" TEST_EVENT = { "summary": "Test All Day Event", "start": {}, "end": {}, "location": "Test Cases", "description": "test event", "kind": "calendar#event", "created": "2016-06-23T16:37:57.000Z", "transparency": "transparent", "updated": "2016-06-24T01:57:21.045Z", "reminders": {"useDefault": True}, "organizer": { "email": "[email protected]", "displayName": "Organizer Name", "self": True, }, "sequence": 0, "creator": { "email": "[email protected]", "displayName": "Organizer Name", "self": True, }, "id": "_c8rinwq863h45qnucyoi43ny8", "etag": '"2933466882090000"', "htmlLink": "https://www.google.com/calendar/event?eid=*******", "iCalUID": "[email protected]", "status": "confirmed", } def get_calendar_info(calendar): """Convert data from Google into DEVICE_SCHEMA.""" calendar_info = DEVICE_SCHEMA( { CONF_CAL_ID: calendar["id"], CONF_ENTITIES: [ { CONF_TRACK: calendar["track"], CONF_NAME: calendar["summary"], CONF_DEVICE_ID: slugify(calendar["summary"]), CONF_IGNORE_AVAILABILITY: calendar.get("ignore_availability", True), } ], } ) return calendar_info @pytest.fixture(autouse=True) def mock_google_setup(hass, test_calendar): """Mock the google set up functions.""" hass.loop.run_until_complete(async_setup_component(hass, "group", {"group": {}})) calendar = get_calendar_info(test_calendar) calendars = {calendar[CONF_CAL_ID]: calendar} patch_google_auth = patch( "homeassistant.components.google.do_authentication", side_effect=do_setup ) patch_google_load = patch( "homeassistant.components.google.load_config", return_value=calendars ) patch_google_services = patch("homeassistant.components.google.setup_services") async_mock_service(hass, "google", SERVICE_SCAN_CALENDARS) with patch_google_auth, patch_google_load, patch_google_services: yield @pytest.fixture(autouse=True) def set_time_zone(): """Set the time zone for the tests.""" # Set our timezone to CST/Regina so we can check calculations # This keeps UTC-6 all year round dt_util.set_default_time_zone(dt_util.get_time_zone("America/Regina")) yield dt_util.set_default_time_zone(dt_util.get_time_zone("UTC")) @pytest.fixture(name="google_service") def mock_google_service(): """Mock google service.""" patch_google_service = patch( "homeassistant.components.google.calendar.GoogleCalendarService" ) with patch_google_service as mock_service: yield mock_service async def test_all_day_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" week_from_today = dt_util.dt.date.today() + dt_util.dt.timedelta(days=7) end_event = week_from_today + dt_util.dt.timedelta(days=1) event = copy.deepcopy(TEST_EVENT) start = week_from_today.isoformat() end = end_event.isoformat() event["start"]["date"] = start event["end"]["date"] = end mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event["summary"], "all_day": True, "offset_reached": False, "start_time": week_from_today.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_future_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" one_hour_from_now = dt_util.now() + dt_util.dt.timedelta(minutes=30) end_event = one_hour_from_now + dt_util.dt.timedelta(minutes=60) start = one_hour_from_now.isoformat() end = end_event.isoformat() event = copy.deepcopy(TEST_EVENT) event["start"]["dateTime"] = start event["end"]["dateTime"] = end mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event["summary"], "all_day": False, "offset_reached": False, "start_time": one_hour_from_now.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_in_progress_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" middle_of_event = dt_util.now() - dt_util.dt.timedelta(minutes=30) end_event = middle_of_event + dt_util.dt.timedelta(minutes=60) start = middle_of_event.isoformat() end = end_event.isoformat() event = copy.deepcopy(TEST_EVENT) event["start"]["dateTime"] = start event["end"]["dateTime"] = end mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_ON assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event["summary"], "all_day": False, "offset_reached": False, "start_time": middle_of_event.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_offset_in_progress_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" middle_of_event = dt_util.now() + dt_util.dt.timedelta(minutes=14) end_event = middle_of_event + dt_util.dt.timedelta(minutes=60) start = middle_of_event.isoformat() end = end_event.isoformat() event_summary = "Test Event in Progress" event = copy.deepcopy(TEST_EVENT) event["start"]["dateTime"] = start event["end"]["dateTime"] = end event["summary"] = f"{event_summary} !!-15" mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event_summary, "all_day": False, "offset_reached": True, "start_time": middle_of_event.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } @pytest.mark.skip async def test_all_day_offset_in_progress_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=1) end_event = tomorrow + dt_util.dt.timedelta(days=1) start = tomorrow.isoformat() end = end_event.isoformat() event_summary = "Test All Day Event Offset In Progress" event = copy.deepcopy(TEST_EVENT) event["start"]["date"] = start event["end"]["date"] = end event["summary"] = f"{event_summary} !!-25:0" mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event_summary, "all_day": True, "offset_reached": True, "start_time": tomorrow.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_all_day_offset_event(hass, mock_next_event): """Test that we can create an event trigger on device.""" tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=2) end_event = tomorrow + dt_util.dt.timedelta(days=1) start = tomorrow.isoformat() end = end_event.isoformat() offset_hours = 1 + dt_util.now().hour event_summary = "Test All Day Event Offset" event = copy.deepcopy(TEST_EVENT) event["start"]["date"] = start event["end"]["date"] = end event["summary"] = f"{event_summary} !!-{offset_hours}:0" mock_next_event.return_value.event = event assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == STATE_OFF assert dict(state.attributes) == { "friendly_name": TEST_ENTITY_NAME, "message": event_summary, "all_day": True, "offset_reached": False, "start_time": tomorrow.strftime(DATE_STR_FORMAT), "end_time": end_event.strftime(DATE_STR_FORMAT), "location": event["location"], "description": event["description"], } async def test_update_error(hass, google_service): """Test that the calendar handles a server error.""" google_service.return_value.get = Mock( side_effect=httplib2.ServerNotFoundError("unit test") ) assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() state = hass.states.get(TEST_ENTITY) assert state.name == TEST_ENTITY_NAME assert state.state == "off" async def test_calendars_api(hass, hass_client, google_service): """Test the Rest API returns the calendar.""" assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() client = await hass_client() response = await client.get("/api/calendars") assert response.status == HTTPStatus.OK data = await response.json() assert data == [ { "entity_id": TEST_ENTITY, "name": TEST_ENTITY_NAME, } ] async def test_http_event_api_failure(hass, hass_client, google_service): """Test the Rest API response during a calendar failure.""" google_service.return_value.get = Mock( side_effect=httplib2.ServerNotFoundError("unit test") ) assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() start = dt_util.now().isoformat() end = (dt_util.now() + dt_util.dt.timedelta(minutes=60)).isoformat() client = await hass_client() response = await client.get(f"/api/calendars/{TEST_ENTITY}?start={start}&end={end}") assert response.status == HTTPStatus.OK # A failure to talk to the server results in an empty list of events events = await response.json() assert events == [] @pytest.fixture def mock_events_list( google_service: GoogleCalendarService, ) -> Callable[[dict[str, Any]], None]: """Fixture to construct a fake event list API response.""" def _put_result(response: dict[str, Any]) -> None: google_service.return_value.get.return_value.events.return_value.list.return_value.execute.return_value = ( response ) return return _put_result async def test_http_api_event(hass, hass_client, google_service, mock_events_list): """Test querying the API and fetching events from the server.""" now = dt_util.now() mock_events_list( { "items": [ { "summary": "Event title", "start": {"dateTime": now.isoformat()}, "end": { "dateTime": (now + dt_util.dt.timedelta(minutes=5)).isoformat() }, } ], } ) assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() start = (now - dt_util.dt.timedelta(minutes=60)).isoformat() end = (now + dt_util.dt.timedelta(minutes=60)).isoformat() client = await hass_client() response = await client.get(f"/api/calendars/{TEST_ENTITY}?start={start}&end={end}") assert response.status == HTTPStatus.OK events = await response.json() assert len(events) == 1 assert "summary" in events[0] assert events[0]["summary"] == "Event title" def create_ignore_avail_calendar() -> dict[str, Any]: """Create a calendar with ignore_availability set.""" calendar = TEST_CALENDAR.copy() calendar["ignore_availability"] = False return calendar @pytest.mark.parametrize("test_calendar", [create_ignore_avail_calendar()]) async def test_opaque_event(hass, hass_client, google_service, mock_events_list): """Test querying the API and fetching events from the server.""" now = dt_util.now() mock_events_list( { "items": [ { "summary": "Event title", "transparency": "opaque", "start": {"dateTime": now.isoformat()}, "end": { "dateTime": (now + dt_util.dt.timedelta(minutes=5)).isoformat() }, } ], } ) assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() start = (now - dt_util.dt.timedelta(minutes=60)).isoformat() end = (now + dt_util.dt.timedelta(minutes=60)).isoformat() client = await hass_client() response = await client.get(f"/api/calendars/{TEST_ENTITY}?start={start}&end={end}") assert response.status == HTTPStatus.OK events = await response.json() assert len(events) == 1 assert "summary" in events[0] assert events[0]["summary"] == "Event title" @pytest.mark.parametrize("test_calendar", [create_ignore_avail_calendar()]) async def test_transparent_event(hass, hass_client, google_service, mock_events_list): """Test querying the API and fetching events from the server.""" now = dt_util.now() mock_events_list( { "items": [ { "summary": "Event title", "transparency": "transparent", "start": {"dateTime": now.isoformat()}, "end": { "dateTime": (now + dt_util.dt.timedelta(minutes=5)).isoformat() }, } ], } ) assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG}) await hass.async_block_till_done() start = (now - dt_util.dt.timedelta(minutes=60)).isoformat() end = (now + dt_util.dt.timedelta(minutes=60)).isoformat() client = await hass_client() response = await client.get(f"/api/calendars/{TEST_ENTITY}?start={start}&end={end}") assert response.status == HTTPStatus.OK events = await response.json() assert events == []
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import tabs from openstack_dashboard import api from openstack_dashboard.dashboards.project.instances import tables as itables from gbpui import client from gbpui import column_filters as gfilters import tables PTGsTable = tables.PTGsTable External_PTGsTable = tables.ExternalPTGsTable class PTGsTab(tabs.TableTab): table_classes = (PTGsTable,) name = _("Internal") slug = "policytargets" template_name = "horizon/common/_detail_table.html" def get_policy_targetstable_data(self): policy_targets = [] try: policy_targets = client.policy_target_list(self.tab_group.request, tenant_id=self.tab_group.request.user.tenant_id) a = lambda x, y: gfilters.update_policy_target_attributes(x, y) policy_targets = [a(self.request, item) for item in policy_targets] except Exception as e: msg = _('Unable to retrieve policy_target list. %s') % (str(e)) exceptions.handle(self.tab_group.request, msg) for policy_target in policy_targets: policy_target.set_id_as_name_if_empty() return policy_targets class ExternalPTGsTab(tabs.TableTab): table_classes = (External_PTGsTable,) name = _("External") slug = "externalpolicytargets" template_name = "horizon/common/_detail_table.html" def get_external_policy_targetstable_data(self): external_policy_targets = [] try: external_policy_targets = client.ext_policy_target_list( self.tab_group.request, tenant_id=self.tab_group.request.user.tenant_id) a = lambda x, y: gfilters.update_policy_target_attributes(x, y) external_policy_targets = [a(self.request, item) for item in external_policy_targets] except Exception as e: msg = _('Unable to retrieve policy_target list. %s') % (str(e)) exceptions.handle(self.tab_group.request, msg) for policy_target in external_policy_targets: policy_target.set_id_as_name_if_empty() return external_policy_targets class PTGTabs(tabs.TabGroup): slug = "policy_targettabs" tabs = (PTGsTab, ExternalPTGsTab) sticky = True class PTGDetailsTab(tabs.Tab): name = _("Group Details") slug = "policy_targetdetails" template_name = "project/policytargets/_policy_target_details.html" failure_url = reverse_lazy('horizon:project:policy_target_group:index') def get_context_data(self, request): policy_targetid = self.tab_group.kwargs['policy_target_id'] nsp = '' try: policy_target = client.policy_target_get(request, policy_targetid) l2_policy = client.l2policy_get(request, policy_target["l2_policy_id"]) l3_policy = client.l3policy_get(request, l2_policy["l3_policy_id"]) if policy_target['network_service_policy_id']: nsp_id = policy_target['network_service_policy_id'] nsp = client.get_networkservice_policy(request, nsp_id) except Exception: exceptions.handle( request, _('Unable to retrieve group details.'), redirect=self.failure_url) return {'policy_target': policy_target, 'l3_policy': l3_policy, 'l2_policy': l2_policy, 'nsp': nsp} class PTGDetailsTabs(tabs.TabGroup): slug = "policy_targettabs" tabs = (PTGDetailsTab,) class InstancesTab(tabs.TableTab): name = _("Members") slug = "members_tab" table_classes = (tables.InstancesTable,) template_name = ("horizon/common/_detail_table.html") preload = True def get_instances_data(self): policy_targetid = self.tab_group.kwargs['policy_target_id'] filtered_instances = [] try: policytargets = client.pt_list(self.request, tenant_id=self.request.user.tenant_id, policy_target_group_id=policy_targetid) policy_target_ports = [x.port_id for x in policytargets] marker = self.request.GET.get( tables.InstancesTable._meta.pagination_param, None) # TODO(Sumit): Setting paginate to False is a temporary # fix. Earlier, when paginate was set to True we were # retrieving instances in pages and were only processing # the first page. While pagination is required for # scaling to a large number of instances, we need to first # retrieve the instances in pages, then process them, # and then show the filtered list (filtered_instances) # in pages. instances, self._has_more = api.nova.server_list( self.request, search_opts={'marker': marker, 'paginate': False}) self._has_more = False instances = [item for item in instances if not itables.is_deleting(item)] for item in instances: for port in api.neutron.port_list(self.request, device_id=item.id): if port.id in policy_target_ports: filtered_instances.append(item) break except Exception: self._has_more = False error_message = _('Unable to get instances') exceptions.handle(self.request, error_message) filtered_instances = [] return filtered_instances class ConsumedTab(tabs.TableTab): name = _('Consumed Policy Rule Set') slug = 'consumed_policy_rule_sets_tab' table_classes = (tables.ConsumedContractsTable,) template_name = ("horizon/common/_detail_table.html") def get_consumed_policy_rule_sets_data(self): try: policy_targetid = self.tab_group.kwargs['policy_target_id'] policy_target = client.policy_target_get( self.request, policy_targetid) consumed_policy_rule_set_ids = policy_target.get( 'consumed_policy_rule_sets') consumed_policy_rule_sets = [] for _id in consumed_policy_rule_set_ids: consumed_policy_rule_sets.append( client.policy_rule_set_get(self.request, _id)) consumed_policy_rule_sets = [gfilters.update_pruleset_attributes( self.request, item) for item in consumed_policy_rule_sets] return consumed_policy_rule_sets except Exception: error_message = _('Unable to get consumed rule sets') exceptions.handle(self.request, error_message) return [] class ProvidedTab(tabs.TableTab): name = _('Provided Policy Rule Set') slug = 'provided_policy_rule_sets_tab' table_classes = (tables.ProvidedContractsTable,) template_name = ("horizon/common/_detail_table.html") def get_provided_policy_rule_sets_data(self): try: policy_targetid = self.tab_group.kwargs['policy_target_id'] policy_target = client.policy_target_get( self.request, policy_targetid) provided_policy_rule_set_ids = policy_target.get( 'provided_policy_rule_sets') provided_policy_rule_sets = [] for _id in provided_policy_rule_set_ids: provided_policy_rule_sets.append( client.policy_rule_set_get(self.request, _id)) provided_policy_rule_sets = [gfilters.update_pruleset_attributes( self.request, item) for item in provided_policy_rule_sets] return provided_policy_rule_sets except Exception: error_message = _('Unable to get provided rule sets') exceptions.handle(self.request, error_message) return [] class PTGMemberTabs(tabs.TabGroup): slug = 'member_tabs' tabs = (InstancesTab, ProvidedTab, ConsumedTab, PTGDetailsTab,) stiky = True class ExtProvidedTab(ProvidedTab): table_classes = (tables.ExtProvidedContractsTable,) def get_provided_policy_rule_sets_data(self): try: ext_policy_targetid = self.tab_group.kwargs['ext_policy_target_id'] ext_policy_target = client.ext_policy_target_get( self.request, ext_policy_targetid) provided_policy_rule_set_ids = ext_policy_target.get( 'provided_policy_rule_sets') provided_policy_rule_sets = [] for _id in provided_policy_rule_set_ids: provided_policy_rule_sets.append( client.policy_rule_set_get(self.request, _id)) provided_policy_rule_sets = [gfilters.update_pruleset_attributes( self.request, item) for item in provided_policy_rule_sets] return provided_policy_rule_sets except Exception: error_message = _('Unable to get provided rule sets') exceptions.handle(self.request, error_message) return [] class ExtConsumedTab(ConsumedTab): table_classes = (tables.ExtConsumedContractsTable,) def get_consumed_policy_rule_sets_data(self): try: ext_policy_targetid = self.tab_group.kwargs['ext_policy_target_id'] ext_policy_target = client.ext_policy_target_get( self.request, ext_policy_targetid) consumed_policy_rule_set_ids = ext_policy_target.get( 'consumed_policy_rule_sets') consumed_policy_rule_sets = [] for _id in consumed_policy_rule_set_ids: consumed_policy_rule_sets.append( client.policy_rule_set_get(self.request, _id)) consumed_policy_rule_sets = [gfilters.update_pruleset_attributes( self.request, item) for item in consumed_policy_rule_sets] return consumed_policy_rule_sets except Exception: error_message = _('Unable to get consumed rule sets') exceptions.handle(self.request, error_message) return [] class ExternalPTGMemberTabs(tabs.TabGroup): slug = 'members' tabs = (ExtProvidedTab, ExtConsumedTab) sticky = True
#!/Users/administrator/dev/njode/env/bin/python2.7 """PILdriver, an image-processing calculator using PIL. An instance of class PILDriver is essentially a software stack machine (Polish-notation interpreter) for sequencing PIL image transformations. The state of the instance is the interpreter stack. The only method one will normally invoke after initialization is the `execute' method. This takes an argument list of tokens, pushes them onto the instance's stack, and then tries to clear the stack by successive evaluation of PILdriver operators. Any part of the stack not cleaned off persists and is part of the evaluation context for the next call of the execute method. PILDriver doesn't catch any exceptions, on the theory that these are actually diagnostic information that should be interpreted by the calling code. When called as a script, the command-line arguments are passed to a PILDriver instance. If there are no command-line arguments, the module runs an interactive interpreter, each line of which is split into space-separated tokens and passed to the execute method. In the method descriptions below, a first line beginning with the string `usage:' means this method can be invoked with the token that follows it. Following <>-enclosed arguments describe how the method interprets the entries on the stack. Each argument specification begins with a type specification: either `int', `float', `string', or `image'. All operations consume their arguments off the stack (use `dup' to keep copies around). Use `verbose 1' to see the stack state displayed before each operation. Usage examples: `show crop 0 0 200 300 open test.png' loads test.png, crops out a portion of its upper-left-hand corner and displays the cropped portion. `save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it 30 degrees, and saves the result as rotated.png (in PNG format). """ # by Eric S. Raymond <[email protected]> # $Id$ # TO DO: # 1. Add PILFont capabilities, once that's documented. # 2. Add PILDraw operations. # 3. Add support for composing and decomposing multiple-image files. # from __future__ import print_function from PIL import Image class PILDriver: verbose = 0 def do_verbose(self): """usage: verbose <int:num> Set verbosity flag from top of stack. """ self.verbose = int(self.do_pop()) # The evaluation stack (internal only) stack = [] # Stack of pending operations def push(self, item): "Push an argument onto the evaluation stack." self.stack = [item] + self.stack def top(self): "Return the top-of-stack element." return self.stack[0] # Stack manipulation (callable) def do_clear(self): """usage: clear Clear the stack. """ self.stack = [] def do_pop(self): """usage: pop Discard the top element on the stack. """ top = self.stack[0] self.stack = self.stack[1:] return top def do_dup(self): """usage: dup Duplicate the top-of-stack item. """ if hasattr(self, 'format'): # If it's an image, do a real copy dup = self.stack[0].copy() else: dup = self.stack[0] self.stack = [dup] + self.stack def do_swap(self): """usage: swap Swap the top-of-stack item with the next one down. """ self.stack = [self.stack[1], self.stack[0]] + self.stack[2:] # Image module functions (callable) def do_new(self): """usage: new <int:xsize> <int:ysize> <int:color>: Create and push a greyscale image of given size and color. """ xsize = int(self.do_pop()) ysize = int(self.do_pop()) color = int(self.do_pop()) self.push(Image.new("L", (xsize, ysize), color)) def do_open(self): """usage: open <string:filename> Open the indicated image, read it, push the image on the stack. """ self.push(Image.open(self.do_pop())) def do_blend(self): """usage: blend <image:pic1> <image:pic2> <float:alpha> Replace two images and an alpha with the blended image. """ image1 = self.do_pop() image2 = self.do_pop() alpha = float(self.do_pop()) self.push(Image.blend(image1, image2, alpha)) def do_composite(self): """usage: composite <image:pic1> <image:pic2> <image:mask> Replace two images and a mask with their composite. """ image1 = self.do_pop() image2 = self.do_pop() mask = self.do_pop() self.push(Image.composite(image1, image2, mask)) def do_merge(self): """usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]] Merge top-of stack images in a way described by the mode. """ mode = self.do_pop() bandlist = [] for band in mode: bandlist.append(self.do_pop()) self.push(Image.merge(mode, bandlist)) # Image class methods def do_convert(self): """usage: convert <string:mode> <image:pic1> Convert the top image to the given mode. """ mode = self.do_pop() image = self.do_pop() self.push(image.convert(mode)) def do_copy(self): """usage: copy <image:pic1> Make and push a true copy of the top image. """ self.dup() def do_crop(self): """usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1> Crop and push a rectangular region from the current image. """ left = int(self.do_pop()) upper = int(self.do_pop()) right = int(self.do_pop()) lower = int(self.do_pop()) image = self.do_pop() self.push(image.crop((left, upper, right, lower))) def do_draft(self): """usage: draft <string:mode> <int:xsize> <int:ysize> Configure the loader for a given mode and size. """ mode = self.do_pop() xsize = int(self.do_pop()) ysize = int(self.do_pop()) self.push(self.draft(mode, (xsize, ysize))) def do_filter(self): """usage: filter <string:filtername> <image:pic1> Process the top image with the given filter. """ from PIL import ImageFilter filter = eval("ImageFilter." + self.do_pop().upper()) image = self.do_pop() self.push(image.filter(filter)) def do_getbbox(self): """usage: getbbox Push left, upper, right, and lower pixel coordinates of the top image. """ bounding_box = self.do_pop().getbbox() self.push(bounding_box[3]) self.push(bounding_box[2]) self.push(bounding_box[1]) self.push(bounding_box[0]) def do_getextrema(self): """usage: extrema Push minimum and maximum pixel values of the top image. """ extrema = self.do_pop().extrema() self.push(extrema[1]) self.push(extrema[0]) def do_offset(self): """usage: offset <int:xoffset> <int:yoffset> <image:pic1> Offset the pixels in the top image. """ xoff = int(self.do_pop()) yoff = int(self.do_pop()) image = self.do_pop() self.push(image.offset(xoff, yoff)) def do_paste(self): """usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground> Paste figure image into ground with upper left at given offsets. """ figure = self.do_pop() xoff = int(self.do_pop()) yoff = int(self.do_pop()) ground = self.do_pop() if figure.mode == "RGBA": ground.paste(figure, (xoff, yoff), figure) else: ground.paste(figure, (xoff, yoff)) self.push(ground) def do_resize(self): """usage: resize <int:xsize> <int:ysize> <image:pic1> Resize the top image. """ ysize = int(self.do_pop()) xsize = int(self.do_pop()) image = self.do_pop() self.push(image.resize((xsize, ysize))) def do_rotate(self): """usage: rotate <int:angle> <image:pic1> Rotate image through a given angle """ angle = int(self.do_pop()) image = self.do_pop() self.push(image.rotate(angle)) def do_save(self): """usage: save <string:filename> <image:pic1> Save image with default options. """ filename = self.do_pop() image = self.do_pop() image.save(filename) def do_save2(self): """usage: save2 <string:filename> <string:options> <image:pic1> Save image with specified options. """ filename = self.do_pop() options = self.do_pop() image = self.do_pop() image.save(filename, None, options) def do_show(self): """usage: show <image:pic1> Display and pop the top image. """ self.do_pop().show() def do_thumbnail(self): """usage: thumbnail <int:xsize> <int:ysize> <image:pic1> Modify the top image in the stack to contain a thumbnail of itself. """ ysize = int(self.do_pop()) xsize = int(self.do_pop()) self.top().thumbnail((xsize, ysize)) def do_transpose(self): """usage: transpose <string:operator> <image:pic1> Transpose the top image. """ transpose = self.do_pop().upper() image = self.do_pop() self.push(image.transpose(transpose)) # Image attributes def do_format(self): """usage: format <image:pic1> Push the format of the top image onto the stack. """ self.push(self.do_pop().format) def do_mode(self): """usage: mode <image:pic1> Push the mode of the top image onto the stack. """ self.push(self.do_pop().mode) def do_size(self): """usage: size <image:pic1> Push the image size on the stack as (y, x). """ size = self.do_pop().size self.push(size[0]) self.push(size[1]) # ImageChops operations def do_invert(self): """usage: invert <image:pic1> Invert the top image. """ from PIL import ImageChops self.push(ImageChops.invert(self.do_pop())) def do_lighter(self): """usage: lighter <image:pic1> <image:pic2> Pop the two top images, push an image of the lighter pixels of both. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.lighter(image1, image2)) def do_darker(self): """usage: darker <image:pic1> <image:pic2> Pop the two top images, push an image of the darker pixels of both. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.darker(image1, image2)) def do_difference(self): """usage: difference <image:pic1> <image:pic2> Pop the two top images, push the difference image """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.difference(image1, image2)) def do_multiply(self): """usage: multiply <image:pic1> <image:pic2> Pop the two top images, push the multiplication image. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() self.push(ImageChops.multiply(image1, image2)) def do_screen(self): """usage: screen <image:pic1> <image:pic2> Pop the two top images, superimpose their inverted versions. """ from PIL import ImageChops image2 = self.do_pop() image1 = self.do_pop() self.push(ImageChops.screen(image1, image2)) def do_add(self): """usage: add <image:pic1> <image:pic2> <int:offset> <float:scale> Pop the two top images, produce the scaled sum with offset. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() scale = float(self.do_pop()) offset = int(self.do_pop()) self.push(ImageChops.add(image1, image2, scale, offset)) def do_subtract(self): """usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale> Pop the two top images, produce the scaled difference with offset. """ from PIL import ImageChops image1 = self.do_pop() image2 = self.do_pop() scale = float(self.do_pop()) offset = int(self.do_pop()) self.push(ImageChops.subtract(image1, image2, scale, offset)) # ImageEnhance classes def do_color(self): """usage: color <image:pic1> Enhance color in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Color(image) self.push(enhancer.enhance(factor)) def do_contrast(self): """usage: contrast <image:pic1> Enhance contrast in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Contrast(image) self.push(enhancer.enhance(factor)) def do_brightness(self): """usage: brightness <image:pic1> Enhance brightness in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Brightness(image) self.push(enhancer.enhance(factor)) def do_sharpness(self): """usage: sharpness <image:pic1> Enhance sharpness in the top image. """ from PIL import ImageEnhance factor = float(self.do_pop()) image = self.do_pop() enhancer = ImageEnhance.Sharpness(image) self.push(enhancer.enhance(factor)) # The interpreter loop def execute(self, list): "Interpret a list of PILDriver commands." list.reverse() while len(list) > 0: self.push(list[0]) list = list[1:] if self.verbose: print("Stack: " + repr(self.stack)) top = self.top() if not isinstance(top, str): continue funcname = "do_" + top if not hasattr(self, funcname): continue else: self.do_pop() func = getattr(self, funcname) func() if __name__ == '__main__': import sys try: import readline except ImportError: pass # not available on all platforms # If we see command-line arguments, interpret them as a stack state # and execute. Otherwise go interactive. driver = PILDriver() if len(sys.argv[1:]) > 0: driver.execute(sys.argv[1:]) else: print("PILDriver says hello.") while True: try: if sys.version_info[0] >= 3: line = input('pildriver> ') else: line = raw_input('pildriver> ') except EOFError: print("\nPILDriver says goodbye.") break driver.execute(line.split()) print(driver.stack) # The following sets edit modes for GNU EMACS # Local Variables: # mode:python # End:
# -*- coding: utf-8 -*- """ requests.models ~~~~~~~~~~~~~~~ This module contains the primary objects that power Requests. """ import collections import datetime from io import BytesIO, UnsupportedOperation from .hooks import default_hooks from .structures import CaseInsensitiveDict from .auth import HTTPBasicAuth from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar from .packages.urllib3.fields import RequestField from .packages.urllib3.filepost import encode_multipart_formdata from .packages.urllib3.util import parse_url from .packages.urllib3.exceptions import ( DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) from .exceptions import ( HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, ContentDecodingError, ConnectionError, StreamConsumedError) from .utils import ( guess_filename, get_auth_from_url, requote_uri, stream_decode_response_unicode, to_key_val_list, parse_header_links, iter_slices, guess_json_utf, super_len, to_native_string) from .compat import ( cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO, is_py2, chardet, builtin_str, basestring) from .compat import json as complexjson from .status_codes import codes #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( codes.moved, # 301 codes.found, # 302 codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) DEFAULT_REDIRECT_LIMIT = 30 CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 class RequestEncodingMixin(object): @property def path_url(self): """Build the path URL to use.""" url = [] p = urlsplit(self.url) path = p.path if not path: path = '/' url.append(path) query = p.query if query: url.append('?') url.append(query) return ''.join(url) @staticmethod def _encode_params(data): """Encode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if isinstance(data, (str, bytes)): return to_native_string(data) elif hasattr(data, 'read'): return data elif hasattr(data, '__iter__'): result = [] for k, vs in to_key_val_list(data): if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): vs = [vs] for v in vs: if v is not None: result.append( (k.encode('utf-8') if isinstance(k, str) else k, v.encode('utf-8') if isinstance(v, str) else v)) return urlencode(result, doseq=True) else: return data @staticmethod def _encode_files(files, data): """Build the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if (not files): raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, '__iter__'): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( (field.decode('utf-8') if isinstance(field, bytes) else field, v.encode('utf-8') if isinstance(v, str) else v)) for (k, v) in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, (str, bytes, bytearray)): fdata = fp else: fdata = fp.read() rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) rf.make_multipart(content_type=ft) new_fields.append(rf) body, content_type = encode_multipart_formdata(new_fields) return body, content_type class RequestHooksMixin(object): def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % (event)) if isinstance(hook, collections.Callable): self.hooks[event].append(hook) elif hasattr(hook, '__iter__'): self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable)) def deregister_hook(self, event, hook): """Deregister a previously registered hook. Returns True if the hook existed, False if not. """ try: self.hooks[event].remove(hook) return True except ValueError: return False class Request(RequestHooksMixin): """A user-created :class:`Request <Request>` object. Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server. :param method: HTTP method to use. :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place. :param json: json for the body to attach to the request (if files or data is not specified). :param params: dictionary of URL parameters to append to the URL. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> req.prepare() <PreparedRequest [GET]> """ def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): # Default empty dicts for dict params. data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): self.register_hook(event=k, hook=v) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies def __repr__(self): return '<Request [%s]>' % (self.method) def prepare(self): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" p = PreparedRequest() p.prepare( method=self.method, url=self.url, headers=self.headers, files=self.files, data=self.data, json=self.json, params=self.params, auth=self.auth, cookies=self.cookies, hooks=self.hooks, ) return p class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): """The fully mutable :class:`PreparedRequest <PreparedRequest>` object, containing the exact bytes that will be sent to the server. Generated from either a :class:`Request <Request>` object or manually. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> r = req.prepare() <PreparedRequest [GET]> >>> s = requests.Session() >>> s.send(r) <Response [200]> """ def __init__(self): #: HTTP verb to send to the server. self.method = None #: HTTP URL to send the request to. self.url = None #: dictionary of HTTP headers. self.headers = None # The `CookieJar` used to create the Cookie header will be stored here # after prepare_cookies is called self._cookies = None #: request body to send to the server. self.body = None #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() def prepare(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): """Prepares the entire request with the given parameters.""" self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files, json) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks) def __repr__(self): return '<PreparedRequest [%s]>' % (self.method) def copy(self): p = PreparedRequest() p.method = self.method p.url = self.url p.headers = self.headers.copy() if self.headers is not None else None p._cookies = _copy_cookie_jar(self._cookies) p.body = self.body p.hooks = self.hooks return p def prepare_method(self, method): """Prepares the given HTTP method.""" self.method = method if self.method is not None: self.method = to_native_string(self.method.upper()) def prepare_url(self, url, params): """Prepares the given HTTP URL.""" #: Accept objects that have string representations. #: We're unable to blindy call unicode/str functions #: as this will include the bytestring indicator (b'') #: on python 3.x. #: https://github.com/kennethreitz/requests/pull/2238 if isinstance(url, bytes): url = url.decode('utf8') else: url = unicode(url) if is_py2 else str(url) # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. if ':' in url and not url.lower().startswith('http'): self.url = url return # Support for unicode domain names and paths. try: scheme, auth, host, port, path, query, fragment = parse_url(url) except LocationParseError as e: raise InvalidURL(*e.args) if not scheme: error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") error = error.format(to_native_string(url, 'utf8')) raise MissingSchema(error) if not host: raise InvalidURL("Invalid URL %r: No host supplied" % url) # Only want to apply IDNA to the hostname try: host = host.encode('idna').decode('utf-8') except UnicodeError: raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location netloc = auth or '' if netloc: netloc += '@' netloc += host if port: netloc += ':' + str(port) # Bare domains aren't valid URLs. if not path: path = '/' if is_py2: if isinstance(scheme, str): scheme = scheme.encode('utf-8') if isinstance(netloc, str): netloc = netloc.encode('utf-8') if isinstance(path, str): path = path.encode('utf-8') if isinstance(query, str): query = query.encode('utf-8') if isinstance(fragment, str): fragment = fragment.encode('utf-8') enc_params = self._encode_params(params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url def prepare_headers(self, headers): """Prepares the given HTTP headers.""" if headers: self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items()) else: self.headers = CaseInsensitiveDict() def prepare_body(self, data, files, json=None): """Prepares the given HTTP body data.""" # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None length = None if not data and json is not None: content_type = 'application/json' body = complexjson.dumps(json) is_stream = all([ hasattr(data, '__iter__'), not isinstance(data, (basestring, list, tuple, dict)) ]) try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None if is_stream: body = data if files: raise NotImplementedError('Streamed bodies and files are mutually exclusive.') if length is not None: self.headers['Content-Length'] = builtin_str(length) else: self.headers['Transfer-Encoding'] = 'chunked' else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data: body = self._encode_params(data) if isinstance(data, basestring) or hasattr(data, 'read'): content_type = None else: content_type = 'application/x-www-form-urlencoded' self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if content_type and ('content-type' not in self.headers): self.headers['Content-Type'] = content_type self.body = body def prepare_content_length(self, body): if hasattr(body, 'seek') and hasattr(body, 'tell'): body.seek(0, 2) self.headers['Content-Length'] = builtin_str(body.tell()) body.seek(0, 0) elif body is not None: l = super_len(body) if l: self.headers['Content-Length'] = builtin_str(l) elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None): self.headers['Content-Length'] = '0' def prepare_auth(self, auth, url=''): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = HTTPBasicAuth(*auth) # Allow auth to make its changes. r = auth(self) # Update self to reflect the auth changes. self.__dict__.update(r.__dict__) # Recompute Content-Length self.prepare_content_length(self.body) def prepare_cookies(self, cookies): """Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand.""" if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers['Cookie'] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" # hooks can be passed as None to the prepare method and to this # method. To prevent iterating over None, simply use an empty list # if hooks is False-y hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event]) class Response(object): """The :class:`Response <Response>` object, which contains a server's response to an HTTP request. """ __attrs__ = [ '_content', 'status_code', 'headers', 'url', 'history', 'encoding', 'reason', 'cookies', 'elapsed', 'request' ] def __init__(self): super(Response, self).__init__() self._content = False self._content_consumed = False #: Integer Code of responded HTTP Status, e.g. 404 or 200. self.status_code = None #: Case-insensitive Dictionary of Response Headers. #: For example, ``headers['content-encoding']`` will return the #: value of a ``'Content-Encoding'`` response header. self.headers = CaseInsensitiveDict() #: File-like object representation of response (for advanced usage). #: Use of ``raw`` requires that ``stream=True`` be set on the request. # This requirement does not apply for use internally to Requests. self.raw = None #: Final URL location of Response. self.url = None #: Encoding to decode with when accessing r.text. self.encoding = None #: A list of :class:`Response <Response>` objects from #: the history of the Request. Any redirect responses will end #: up here. The list is sorted from the oldest to the most recent request. self.history = [] #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". self.reason = None #: A CookieJar of Cookies the server sent back. self.cookies = cookiejar_from_dict({}) #: The amount of time elapsed between sending the request #: and the arrival of the response (as a timedelta). #: This property specifically measures the time taken between sending #: the first byte of the request and finishing parsing the headers. It #: is therefore unaffected by consuming the response content or the #: value of the ``stream`` keyword argument. self.elapsed = datetime.timedelta(0) #: The :class:`PreparedRequest <PreparedRequest>` object to which this #: is a response. self.request = None def __getstate__(self): # Consume everything; accessing the content attribute makes # sure the content has been fully read. if not self._content_consumed: self.content return dict( (attr, getattr(self, attr, None)) for attr in self.__attrs__ ) def __setstate__(self, state): for name, value in state.items(): setattr(self, name, value) # pickled objects do not have .raw setattr(self, '_content_consumed', True) setattr(self, 'raw', None) def __repr__(self): return '<Response [%s]>' % (self.status_code) def __bool__(self): """Returns true if :attr:`status_code` is 'OK'.""" return self.ok def __nonzero__(self): """Returns true if :attr:`status_code` is 'OK'.""" return self.ok def __iter__(self): """Allows you to use a response as an iterator.""" return self.iter_content(128) @property def ok(self): try: self.raise_for_status() except HTTPError: return False return True @property def is_redirect(self): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ return ('location' in self.headers and self.status_code in REDIRECT_STATI) @property def is_permanent_redirect(self): """True if this Response one of the permanant versions of redirect""" return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) @property def apparent_encoding(self): """The apparent encoding, provided by the chardet library""" return chardet.detect(self.content)['encoding'] def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. If decode_unicode is True, content will be decoded using the best available encoding based on the response. """ def generate(): # Special case for urllib3. if hasattr(self.raw, 'stream'): try: for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) else: # Standard file-like object. while True: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. """ pending = None for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: pending = None for line in lines: yield line if pending is not None: yield pending @property def content(self): """Content of the response, in bytes.""" if self._content is False: # Read the contents. try: if self._content_consumed: raise RuntimeError( 'The content for this response was already consumed') if self.status_code == 0: self._content = None else: self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes() except AttributeError: self._content = None self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 # since we exhausted the data. return self._content @property def text(self): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property. """ # Try charset from content-type content = None encoding = self.encoding if not self.content: return str('') # Fallback to auto-detected encoding. if self.encoding is None: encoding = self.apparent_encoding # Decode unicode from given encoding. try: content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. # # A TypeError can be raised if encoding is None # # So we try blindly encoding. content = str(self.content, errors='replace') return content def json(self, **kwargs): """Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. """ if not self.encoding and len(self.content) > 3: # No encoding set. JSON RFC 4627 section 3 states we should expect # UTF-8, -16 or -32. Detect which one to use; If the detection or # decoding fails, fall back to `self.text` (using chardet to make # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: try: return complexjson.loads( self.content.decode(encoding), **kwargs ) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass return complexjson.loads(self.text, **kwargs) @property def links(self): """Returns the parsed header links of the response, if any.""" header = self.headers.get('link') # l = MultiDict() l = {} if header: links = parse_header_links(header) for link in links: key = link.get('rel') or link.get('url') l[key] = link return l def raise_for_status(self): """Raises stored :class:`HTTPError`, if one occurred.""" http_error_msg = '' if 400 <= self.status_code < 500: http_error_msg = '%s Client Error: %s for url: %s' % (self.status_code, self.reason, self.url) elif 500 <= self.status_code < 600: http_error_msg = '%s Server Error: %s for url: %s' % (self.status_code, self.reason, self.url) if http_error_msg: raise HTTPError(http_error_msg, response=self) def close(self): """Releases the connection back to the pool. Once this method has been called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.* """ if not self._content_consumed: return self.raw.close() return self.raw.release_conn()
""" <Program Name> test_sig.py <Author> Geremy Condra Vladimir Diaz <[email protected]> <Started> February 28, 2012. Based on a previous version of this module. <Copyright> See LICENSE for licensing information. <Purpose> Test cases for for sig.py. """ import unittest import tuf.keydb import tuf.roledb import tuf.rsa_key import tuf.sig # Setup the keys to use in our test cases. KEYS = [] for _ in range(3): KEYS.append(tuf.rsa_key.generate(2048)) class Test(unittest.TestCase): def setUp(self): pass def test_get_signature_status_no_role(self): signable = {'signed' : 'test', 'signatures' : []} # Should verify we are not adding a duplicate signature # when doing the following action. Here we know 'signable' # has only one signature so it's okay. signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) tuf.keydb.add_rsakey(KEYS[0]) # No specific role we're considering. sig_status = tuf.sig.get_signature_status(signable, None) self.assertEqual(0, sig_status['threshold']) self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) self.assertEqual([], sig_status['bad_sigs']) self.assertEqual([], sig_status['unknown_sigs']) self.assertEqual([], sig_status['untrusted_sigs']) self.assertEqual([], sig_status['unknown_method_sigs']) # Not allowed to call verify() without having specified a role. args = (signable, None) self.assertRaises(tuf.Error, tuf.sig.verify, *args) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) def test_get_signature_status_bad_sig(self): signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) signable['signed'] += 'signature no longer matches signed data' tuf.keydb.add_rsakey(KEYS[0]) threshold = 1 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) sig_status = tuf.sig.get_signature_status(signable, 'Root') self.assertEqual(1, sig_status['threshold']) self.assertEqual([], sig_status['good_sigs']) self.assertEqual([KEYS[0]['keyid']], sig_status['bad_sigs']) self.assertEqual([], sig_status['unknown_sigs']) self.assertEqual([], sig_status['untrusted_sigs']) self.assertEqual([], sig_status['unknown_method_sigs']) self.assertFalse(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) # Remove the role. tuf.roledb.remove_role('Root') def test_get_signature_status_unknown_method(self): signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) signable['signatures'][0]['method'] = 'fake-sig-method' tuf.keydb.add_rsakey(KEYS[0]) threshold = 1 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) sig_status = tuf.sig.get_signature_status(signable, 'Root') self.assertEqual(1, sig_status['threshold']) self.assertEqual([], sig_status['good_sigs']) self.assertEqual([], sig_status['bad_sigs']) self.assertEqual([], sig_status['unknown_sigs']) self.assertEqual([], sig_status['untrusted_sigs']) self.assertEqual([KEYS[0]['keyid']], sig_status['unknown_method_sigs']) self.assertFalse(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) # Remove the role. tuf.roledb.remove_role('Root') def test_get_signature_status_single_key(self): signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) tuf.keydb.add_rsakey(KEYS[0]) threshold = 1 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) sig_status = tuf.sig.get_signature_status(signable, 'Root') self.assertEqual(1, sig_status['threshold']) self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) self.assertEqual([], sig_status['bad_sigs']) self.assertEqual([], sig_status['unknown_sigs']) self.assertEqual([], sig_status['untrusted_sigs']) self.assertEqual([], sig_status['unknown_method_sigs']) self.assertTrue(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) # Remove the role. tuf.roledb.remove_role('Root') def test_get_signature_status_below_threshold(self): signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) tuf.keydb.add_rsakey(KEYS[0]) threshold = 2 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid'], KEYS[2]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) sig_status = tuf.sig.get_signature_status(signable, 'Root') self.assertEqual(2, sig_status['threshold']) self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) self.assertEqual([], sig_status['bad_sigs']) self.assertEqual([], sig_status['unknown_sigs']) self.assertEqual([], sig_status['untrusted_sigs']) self.assertEqual([], sig_status['unknown_method_sigs']) self.assertFalse(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) # Remove the role. tuf.roledb.remove_role('Root') def test_get_signature_status_below_threshold_unrecognized_sigs(self): signable = {'signed' : 'test', 'signatures' : []} # Two keys sign it, but only one of them will be trusted. signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[2])) tuf.keydb.add_rsakey(KEYS[0]) tuf.keydb.add_rsakey(KEYS[1]) threshold = 2 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid'], KEYS[1]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) sig_status = tuf.sig.get_signature_status(signable, 'Root') self.assertEqual(2, sig_status['threshold']) self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) self.assertEqual([], sig_status['bad_sigs']) self.assertEqual([KEYS[2]['keyid']], sig_status['unknown_sigs']) self.assertEqual([], sig_status['untrusted_sigs']) self.assertEqual([], sig_status['unknown_method_sigs']) self.assertFalse(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) tuf.keydb.remove_key(KEYS[1]['keyid']) # Remove the role. tuf.roledb.remove_role('Root') def test_get_signature_status_below_threshold_unauthorized_sigs(self): signable = {'signed' : 'test', 'signatures' : []} # Two keys sign it, but one of them is only trusted for a different # role. signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[1])) tuf.keydb.add_rsakey(KEYS[0]) tuf.keydb.add_rsakey(KEYS[1]) threshold = 2 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid'], KEYS[2]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) roleinfo = tuf.formats.make_role_metadata( [KEYS[1]['keyid'], KEYS[2]['keyid']], threshold) tuf.roledb.add_role('Release', roleinfo) sig_status = tuf.sig.get_signature_status(signable, 'Root') self.assertEqual(2, sig_status['threshold']) self.assertEqual([KEYS[0]['keyid']], sig_status['good_sigs']) self.assertEqual([], sig_status['bad_sigs']) self.assertEqual([], sig_status['unknown_sigs']) self.assertEqual([KEYS[1]['keyid']], sig_status['untrusted_sigs']) self.assertEqual([], sig_status['unknown_method_sigs']) self.assertFalse(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) tuf.keydb.remove_key(KEYS[1]['keyid']) # Remove the roles. tuf.roledb.remove_role('Root') tuf.roledb.remove_role('Release') def test_check_signatures_no_role(self): signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) tuf.keydb.add_rsakey(KEYS[0]) # No specific role we're considering. It's invalid to use the # function tuf.sig.verify() without a role specified because # tuf.sig.verify() is checking trust, as well. args = (signable, None) self.assertRaises(tuf.Error, tuf.sig.verify, *args) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) def test_verify_single_key(self): signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) tuf.keydb.add_rsakey(KEYS[0]) threshold = 1 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) # This will call verify() and return True if 'signable' is valid, # False otherwise. self.assertTrue(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) # Remove the roles. tuf.roledb.remove_role('Root') def test_verify_unrecognized_sig(self): signable = {'signed' : 'test', 'signatures' : []} # Two keys sign it, but only one of them will be trusted. signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[2])) tuf.keydb.add_rsakey(KEYS[0]) tuf.keydb.add_rsakey(KEYS[1]) threshold = 2 roleinfo = tuf.formats.make_role_metadata( [KEYS[0]['keyid'], KEYS[1]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) self.assertFalse(tuf.sig.verify(signable, 'Root')) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[0]['keyid']) tuf.keydb.remove_key(KEYS[1]['keyid']) # Remove the roles. tuf.roledb.remove_role('Root') def test_generate_rsa_signature(self): signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) self.assertEqual(1, len(signable['signatures'])) signature = signable['signatures'][0] self.assertEqual(KEYS[0]['keyid'], signature['keyid']) signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[1])) self.assertEqual(2, len(signable['signatures'])) signature = signable['signatures'][1] self.assertEqual(KEYS[1]['keyid'], signature['keyid']) def test_may_need_new_keys(self): # One untrusted key in 'signable'. signable = {'signed' : 'test', 'signatures' : []} signable['signatures'].append(tuf.sig.generate_rsa_signature( signable['signed'], KEYS[0])) tuf.keydb.add_rsakey(KEYS[1]) threshold = 1 roleinfo = tuf.formats.make_role_metadata( [KEYS[1]['keyid']], threshold) tuf.roledb.add_role('Root', roleinfo) sig_status = tuf.sig.get_signature_status(signable, 'Root') self.assertTrue(tuf.sig.may_need_new_keys(sig_status)) # Done. Let's remove the added key(s) from the key database. tuf.keydb.remove_key(KEYS[1]['keyid']) # Remove the roles. tuf.roledb.remove_role('Root') def test_signable_has_invalid_format(self): # get_signature_status() and verify() validate 'signable' before continuing. # 'signable' must be of the form: {'signed': , 'signatures': [{}]}. # Object types are checked as well. signable = {'not_signed' : 'test', 'signatures' : []} args = (signable['not_signed'], KEYS[0]) self.assertRaises(tuf.FormatError, tuf.sig.get_signature_status, *args) # 'signatures' value must be a list. Let's try a dict. signable = {'signed' : 'test', 'signatures' : {}} args = (signable['signed'], KEYS[0]) self.assertRaises(tuf.FormatError, tuf.sig.get_signature_status, *args) if __name__ == "__main__": unittest.main()
######################################################################## # Copyright (c) 2001-2006 Ciranova, Inc. All Rights Reserved. # # # # Permission is hereby granted, free of charge, to any person # # obtaining a copy of this software and associated documentation # # ("Ciranova Open Code"), to use the Ciranova Open Code without # # restriction, including without limitation the right to use, copy, # # modify, merge, publish, distribute, sublicense, and sell copies of # # the Ciranova Open Code, and to permit persons to whom the Ciranova # # Open Code is furnished to do so, subject to the following # # conditions: # # # # The above copyright notice and this permission notice must be # # included in all copies and all distribution, redistribution, and # # sublicensing of the Ciranova Open Code. THE CIRANOVA OPEN CODE IS # # PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED # # OR STATUTORY INCLUDING WITHOUT LIMITATION ANY WARRANTY OF # # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND # # NONINFRINGEMENT. IN NO EVENT SHALL CIRANOVA, INC. BE LIABLE FOR ANY # # INDIRECT, PUNITIVE, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES # # ARISING FROM, OUT OF OR IN CONNECTION WITH THE CIRANOVA OPEN CODE # # OR ANY USE OF THE CIRANOVA OPEN CODE, OR BE LIABLE FOR ANY CLAIM, # # DAMAGES OR OTHER LIABILITY, HOWEVER IT ARISES AND ON ANY THEORY OF # # LIABILITY, WHETHER IN AN ACTION FOR CONTRACT, STRICT LIABILITY OR # # TORT (INCLUDING NEGLIGENCE), OR OTHERWISE, ARISING FROM, OUT OF OR # # IN CONNECTION WITH THE CIRANOVA OPEN CODE OR ANY USE OF THE # # CIRANOVA OPEN CODE. The Ciranova Open Code is subject to U.S. # # export control laws and may be subject to export or import # # regulations in other countries, and all use of the Ciranova Open # # Code must be in compliance with such laws and regulations. If any # # license for the Ciranova Open Code is obtained pursuant to a # # government contract, all use, distribution and/or copying by the # # U.S. government shall be subject to this permission notice and any # # applicable FAR provisions. # ######################################################################## ######################################################################## # # Mosfet.py # ######################################################################## """Module: Mosfet This module implements a MosfetTemplate class for creating MOS transistor PyCells. MosfetTemplate provides the following capabilities: - (float ) transistor width - (float ) transistor length - (integer) fingers, number of transistors - (boolean) left diffusion contact - (float ) left diffusion contact coverage - (boolean) left transistor gate contact - (float ) left transistor gate contact coverage - (boolean) center diffusion contacts - (float ) center diffusion contact coverage - (boolean) center transistor gates contact - (float ) center transistor gates contact coverage - (boolean) right diffusion contact - (float ) right diffusion contact coverage - (boolean) right transistor gate contact - (float ) right transistor gate contact coverage - Stretch handles for contacts - Stretch handles for gate w & l - Auto-abutment - Electrical connectivity, i.e. nets, pins, terminals. Class variables: - (string ) poly, Layer name - (string ) diffusion, Layer name - (string ) well, Layer name - (string ) implant, Layer name - (string ) contact, Layer name - (string ) metal1, Layer name Technology file requirements: - (minEnclosure poly diffusion) - (minEnclosure diffusion poly ) - (minSpacing contact poly ) - (minSpacing poly ) - (minWidth contact ) Additional requirements exist in Via module. Module dependencies: - cni.dlo, CiraNova PyCell APIs. - Via, Contact PyCells Exceptions: - ValueError, for missing DRC rules in technology file. EDA tool integration: Stretch handles are specific features of layout editors. Standard OpenAccess semantics do not exist. To support stretch handles, we define a standard protocol, and create customized interpreters for each layout editor. This enables us to support stretch handles in multiple layout editors without changes to the Python API or the PyCell implementation. Other notes: [1] Dogbone configurations aren't implemented in this code. For current processes, 90nm and below, the transistor endcap to L-shaped source/drain diffusion spacing is typically bigger. This type of conditional rule is better modeled in upcoming API functions; hence, we defer the implementation. [2] Only creates pins for leftmost diffusion, rightmost diffusion, and leftmost gate. Unclear what to do about the center gates and diffusions, since this could be either a series or a parallel structure. """ __revision__ = "$Id: Mosfet_thkox.py 134 2008-03-21 21:33:04Z [email protected] $" __author__ = "Lyndon C. Lim" from cni.dlo import ( Box, Direction, DloGen, FailAction, Grouping, Instance, Layer, Location, ParamArray, ParamSpecArray, Pin, Point, RangeConstraint, Rect, Term, TermType, Text, ) from cni.integ.common import ( stretchHandle, autoAbutment, ) import traceback from Via import ( ViaInstance, ) class Dictionary: pass #### Layer rules in Santana.tech must be kept up-to-date for this to run correctly! class MosfetTemplate( DloGen): """Defines a MosfetTemplate class. """ poly = "poly" diffusion = "active" well = "nwell or pwell" implant = "pimplant" contact = "contact" metal1 = "metal1" @classmethod def defineParamSpecs(cls, specs): """Define the PyCell parameters. The order of invocation of specs() becomes the order on the form. Arguments: specs - (ParamSpecArray) PyCell parameters """ oxide = "thick" tranType = {"pimplant":"pmos_thkox", "nimplant":"nmos_thkox"}[cls.implant] l = specs.tech.getMosfetParams( tranType, oxide, "minLength") # No dogbone allowed. w = specs.tech.getPhysicalRule( "minWidth", specs.tech.getLayer(cls.contact)) + \ 2.0 * specs.tech.getPhysicalRule( "minEnclosure", specs.tech.getLayer(cls.diffusion), specs.tech.getLayer(cls.contact)) w = max( w, specs.tech.getMosfetParams( tranType, oxide, "minWidth")) specs( "w", w, constraint = RangeConstraint( w, 1000*w, FailAction.USE_DEFAULT)) specs( "l", l, constraint = RangeConstraint( l, 1000*l, FailAction.USE_DEFAULT)) specs( "fingers", 1), parameters = ( ("diffContactLeft", True ), ("diffContactLeftCov", 1.0 ), ("gateContactLeft", False ), ("gateContactLeftCov", 1.0 ), ("diffContactCenter", False ), ("diffContactCenterCov", 1.0 ), ("gateContactCenter", False ), ("gateContactCenterCov", 1.0 ), ("diffContactRight", True ), ("diffContactRightCov", 1.0 ), ("gateContactRight", False ), ("gateContactRightCov", 1.0 ), ) rangeConstraint = RangeConstraint(0.0, 1.0, FailAction.USE_DEFAULT) for parameter in parameters: if isinstance( parameter[1], float): specs( parameter[0], parameter[1], constraint=rangeConstraint) else: specs( parameter[0], parameter[1]) def setupParams( self, params): """Process PyCell parameters, prior to geometric construction. Decisions about process rules and PyCell-specific behaviors should be confined to this method. Create most useful format for variables to be used in later methods. Arguments: params - (ParamArray) PyCell parameters """ for key in params: self.__dict__[key] = params[ key] for key in ( "diffContactLeftCov", "gateContactLeftCov", "diffContactCenterCov", "gateContactCenterCov", "diffContactRightCov", "gateContactRightCov" ): # Contact coverage parameters are 0.0 - 1.0 self.__dict__[key] = min( max( self.__dict__[key], 0), 1.0) # Convert to process layer names if self.implant == "pimplant": self.encLayers = [ self.tech.getLayer( "nwell")] self.well = self.tech.getLayer( "nwell") else: self.encLayers = [ self.tech.getLayer( "pwell")] self.well = self.tech.getLayer( "pwell") self.alt = self.tech.getLayer( "thkox") self.poly = self.tech.getLayer( self.poly ) self.diffusion = self.tech.getLayer( self.diffusion ) self.implant = self.tech.getLayer( self.implant ) self.contact = self.tech.getLayer( self.contact ) self.metal1 = self.tech.getLayer( self.metal1 ) # Implant not an enclosing layer in our kit # self.encLayers.append( self.implant) self.instance = 0 # counter for instance names # Get process design rule information self.Endcap = self.tech.getPhysicalRule( "minEnclosure", self.poly, self.diffusion) self.ContSpacePoly = self.tech.getPhysicalRule( "minSpacing", self.contact, self.poly) self.DiffSpace = self.tech.getPhysicalRule( "minSpacing", self.diffusion) self.GateSpace = self.tech.getPhysicalRule( "minSpacing", self.poly) self.ContWidth = self.tech.getPhysicalRule( "minWidth", self.contact) self.grid = self.tech.getGridResolution() self.gridX2 = self.grid * 2.0 self.gridd2 = self.grid / 2.0 self.w = round(self.w / self.gridX2) * self.gridX2 self.l = round(self.l / self.gridX2) * self.gridX2 self.lDiv2 = self.l / 2.0 self.GatePitch = self.GateSpace + self.l self.GatePitchDiv2 = (self.GateSpace + self.l) / 2.0 self.GateSpaceDiv2 = self.GateSpace / 2.0 self.ContGatePitch = self.ContSpacePoly + self.lDiv2 + (self.ContWidth / 2.0) def genTopology( self): """Define topology (connectivity) for multi-device circuit PyCells. """ pass def sizeDevices( self): """Define device sizes within multi-device circuit PyCells. """ pass def createGate( self, x=0, y=0, terminal=False): """Create the poly rectangle which represents the MOS transistor gate. Override this method to create custom gates. Arguments: x - (integer) x coordinate of gate center y - (integer) y coordinate of lower diffusion edge """ left = x - self.lDiv2 right = x + self.lDiv2 # Create transistor gate gateRect = Rect( self.poly, Box( left, (y - self.Endcap), right, (y + self.w + self.Endcap), ) ) # Stretch handles for w & l stretchHandle( shape = gateRect, name = ("stretch%d" % self.instance), parameter = "w", location = Location.UPPER_CENTER, direction = Direction.NORTH_SOUTH, display = ("w = %.2f" % self.w), stretchType = "relative", userScale = "1.0", userSnap = "0.0025", ) stretchHandle( shape = gateRect, name = ("stretch%d" % self.instance), parameter = "l", location = Location.CENTER_RIGHT, direction = Direction.EAST_WEST, display = ("l = %.2f" % self.l), stretchType = "relative", userScale = "1.0", userSnap = "0.0025", ) # Create weakly-connected pins if terminal: # Bottom gate pin Pin( "%sS%d" % (terminal, self.instance), terminal, Rect( self.poly, Box( left, (y - self.Endcap), right, y, ) ) ) # Top gate pin Pin( "%sN%d" % (terminal, self.instance), terminal, Rect( self.poly, Box( left, (y + self.w), right, (y + self.w + self.Endcap), ) ) ) self.instance += 1 return( gateRect) def createGateCont( self, gateRect=False, coverage=1.0, stretch=False, terminal=False): """Create a gate contact by instantiating a poly contact PyCell. Arguments: gateRect - (PhysicalComponent) Gate rectangle for alignment. coverage - (float ) Percentage of poly width to be covered by contact stretch - (string ) Name of stretch handle property for gate contact """ gateCont = ViaInstance( "pcont", ParamArray(), None, "I%d" % self.instance, ) self.place( gateCont, Direction.SOUTH, gateRect, 0) width = self.l * coverage gateCont.resize( width = width, via = self.contact, metalLayer = self.poly, ) # Create overlapping poly rectangle for stretch handle polyRect = gateCont.promoteMetal( self.poly) bbox = polyRect.getBBox() width = max( width, bbox.getWidth()) / 2 center = bbox.getCenterX() bbox.setLeft( center - width) bbox.setRight( center + width) polyRect.setBBox( bbox) # Stretch handle for gate contact coverage stretchHandle( shape = polyRect, name = ("stretch%d" % self.instance), parameter = stretch, location = Location.CENTER_RIGHT, direction = Direction.EAST_WEST, stretchType = "relative", userScale = "1.0", userSnap = "0.0025", minVal = 0.0, maxVal = 1.0, ) # Create weakly-connected pins if terminal: Pin( ("%sC%d" % (terminal, self.instance)), terminal, Rect( self.poly, bbox) ) self.instance += 1 return( gateCont) def createSourceDrain( self, diffusionType="full", withContact=True, x=0, y=0, coverage=1.0, stretch=False, terminal=False): """Create a source or drain diffusion. Option to create diffusion contact instance. Option to create matching diffusion terminal. Option to create a stretch handle property. Override this method to create custom contacts. Arguments: diffusionType - (string) "full", "left", "right" withContact - (boolean) Create contact x - (float ) x coordinate for center of contact y - (float ) y coordinate for lower diffusion edge coverage - (float ) Percentage of source/drain diffusion to be covered by contact stretch - (string ) Name of stretch handle property """ # Create source/drain contact if withContact: diffCont = ViaInstance( "dcont", ParamArray( origin="lowerCenter"), None, "I%d" % self.instance, ) diffCont.setOrigin( Point(x, y-0.02)) height = self.w * coverage diffCont.resize( height = height, via = self.contact, metalLayer = self.diffusion, ) # Create overlapping diffusion rectangle for stretch handle diffRect = diffCont.promoteMetal( self.diffusion) bbox = diffRect.getBBox() height = max( height, bbox.getHeight()) bbox.setTop( bbox.getBottom() + height) diffRect.setBBox( bbox) # Stretch handle for diffusion contact coverage stretchHandle( shape = diffRect, name = ("stretch%d" % self.instance), parameter = stretch, location = Location.UPPER_CENTER, direction = Direction.NORTH_SOUTH, stretchType = "relative", userScale = "1.0", userSnap = "0.0025", minVal = 0.0, maxVal = 1.0, ) self.instance += 1 # Create source/drain diffusion if withContact: bbox = Box( bbox.getLeft(), y, bbox.getRight(), (y + self.w), ) else: if (diffusionType == "left"): bbox = Box( x, y, (x + self.GateSpaceDiv2), (y + self.w), ) elif (diffusionType == "right"): bbox = Box( (x - self.GateSpaceDiv2), y, x, (y + self.w), ) elif (diffusionType == "full"): bbox = Box( (x - self.GateSpaceDiv2), y, (x + self.GateSpaceDiv2), (y + self.w), ) else: raise ValueError, "Unknown: diffusionType=%s" % diffusionType if terminal: p0 = Pin( terminal, terminal, Rect( self.diffusion, bbox) ) pinRect = p0.getShapes()[0] autoAbutment( pinRect, self.w, [ Direction.WEST], "cniMOS", abut2PinEqual = [ { "spacing":0.0}, { "diffLeftStyle":"DiffHalf" }, { "diffLeftStyle":"DiffHalf" } ], abut2PinBigger = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ], abut3PinBigger = [ { "spacing":0.0}, { "diffLeftStyle":"ContactEdgeAbut2"}, { "diffLeftStyle":"ContactEdgeAbut2"} ], abut3PinEqual = [ { "spacing":0.0}, { "diffLeftStyle":"DiffAbut" }, { "diffLeftStyle":"ContactEdgeAbut2"} ], abut2PinSmaller = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ], abut3PinSmaller = [ { "spacing":0.0}, { "diffLeftStyle":"DiffEdgeAbut" }, { "diffLeftStyle":"DiffEdgeAbut" } ], noAbut = [ { "spacing":0.4}], function = "cniAbut", #shape = pinRect, #abutDirection = diffusionType, #abutClass = "cniMOS", #abutFunction = "cniAbut", #spacingRule = self.DiffSpace, ) else: pinRect = Rect( self.diffusion, bbox) return( pinRect) def genLayout( self): """Main body of geometric construction code. Create the leftmost contact and transistor gate. Loop to create center contacts and gates. Create the rightmost gate and contact. Avoid modifying or overriding this method. PyCell-specific behaviors and calculations should be kept out of this method. """ # obj is used to track the rightmost object, to calculate # the diffusion coordinates. # dbox is the bounding box of the underlying diffusion. dbox = Dictionary() dbox.bottom = 0 dbox.top = self.w origin = Dictionary() xCoord = 0 origin.y = 0 objectPitch = { True:self.ContGatePitch, False:self.GatePitchDiv2, } # Mark PyCell as containing stretch handles self.props["cniStretch"] = "CiraNova" # For integration with layout editors, save parameter # settings in the submaster. They are not saved on the # instance in the default case. # For auto-abutment self.props["diffContactLeft"] = self.diffContactLeft self.props["diffContactRight"] = self.diffContactRight # For stretch handles self.props["w"] = self.w self.props["l"] = self.l # Create electrical terminals needed for pins Term("G", TermType.INPUT) Term("S", TermType.INPUT_OUTPUT) Term("D", TermType.INPUT_OUTPUT) # Create leftmost diffusion contact obj = self.createSourceDrain( diffusionType = "left", withContact = self.diffContactLeft, coverage = self.diffContactLeftCov, stretch = "diffContactLeftCov", terminal = "S", x = xCoord, ) dbox.left = obj.getBBox( self.diffusion).getLeft() # Create leftmost gate w/optional gate contact xCoord += objectPitch[self.diffContactLeft]# + 0.0025 obj = self.createGate( x=xCoord, terminal="G") origin.x = obj.getBBox().left if self.gateContactLeft: self.createGateCont( gateRect = obj, coverage = self.gateContactLeftCov, stretch = "gateContactLeftCov", terminal = "G", ) # Loop to create center gates and contacts for i in range( self.fingers - 2): # Create diffusion contact on left of gate xCoord += objectPitch[self.diffContactCenter] + 0.0025 self.createSourceDrain( diffusionType = "full", withContact = self.diffContactCenter, coverage = self.diffContactCenterCov, stretch = "diffContactCenterCov", x = xCoord, ) # Create gate w/optional gate contact if self.diffContactCenter: xCoord += objectPitch[self.diffContactCenter] + 0.0025 else: xCoord += objectPitch[self.diffContactCenter] - 0.0025 obj = self.createGate( x=xCoord, terminal="G") if self.gateContactCenter: self.createGateCont( gateRect = obj, coverage = self.gateContactCenterCov, stretch = "gateContactCenterCov", terminal = "G", ) # Create rightmost gate w/optional gate contact if self.fingers > 1: if self.diffContactCenter: xCoord += objectPitch[self.diffContactCenter] + 0.0025 else: xCoord += objectPitch[self.diffContactCenter] - 0.0025 self.createSourceDrain( diffusionType = "full", withContact = self.diffContactCenter, coverage = self.diffContactCenterCov, stretch = "diffContactCenterCov", x = xCoord, ) xCoord += objectPitch[self.diffContactCenter] + 0.0025 obj = self.createGate( x=xCoord, terminal="G") if self.gateContactRight: self.createGateCont( gateRect = obj, coverage = self.gateContactRightCov, stretch = "gateContactRightCov", terminal = "G", ) # Create rightmost diffusion contact xCoord += objectPitch[self.diffContactRight]# + 0.0025 obj = self.createSourceDrain( diffusionType = "right", withContact = self.diffContactRight, coverage = self.diffContactRightCov, stretch = "diffContactRightCov", x = xCoord, terminal = "D", ) dbox.right = obj.getBBox(self.diffusion).getRight() # Create overall diffusion box Rect( self.diffusion, Box( dbox.left, dbox.bottom, dbox.right, dbox.top) ) # Create implant box, to overlap diffusion rather than whole cell Rect( self.implant, Box( dbox.left, dbox.bottom, dbox.right, dbox.top) ) Rect( self.well, Box( dbox.left - 0.055, dbox.bottom - 0.055, dbox.right + 0.055, dbox.top + 0.055 ) ) Rect( self.alt, Box( dbox.left - 0.055, dbox.bottom - 0.055, dbox.right + 0.055, dbox.top + 0.055 ) ) # Create other outline layers all = Grouping( "all", self.getComps()) # all.add( self.fgAddEnclosingRects( all, self.encLayers)) This wasn't working, replaced with above rectangles # Setting the origin is important. # Avoid shifting of instance locations during auto-abutment. # Correctly track mouse motion during stretching. all.moveBy( -origin.x, -origin.y) @classmethod def unitTest( cls, paramsMaker, lib, cell, view, ignoreError=True): """Test single instance or specific method of the PyCell. """ # Note: Pass in paramMaker so parameters are constructed in # the correct tech context (within the current DloGen). def unitTestMethod( self): """Define how to build the unit test. """ # Get default parameters from specs, then update # with explicitly supplied specs for unitTest. specs = ParamSpecArray() self.defineParamSpecs( specs) params = ParamArray( specs) params.update( paramsMaker()) print print( "Creating design: %s" % repr(self)) print( " using technology: %s" % self.tech.id()) print( " by %s.generate(%r)" % (self.__class__.__name__, params)) specs.verify( params) self.generate( params) self.save() try: cls.withNewDlo( unitTestMethod, lib, cell, view) except: if ignoreError: # Error messages go to debug log print print( "Exception caught.") traceback.print_exc() else: raise class Nmos_thkox( MosfetTemplate): """Define Nmos class to implement NMOS MOS transistors. """ implant = "nimplant" class Pmos_thkox( MosfetTemplate): """Define Nmos class to implement PMOS MOS transistors. """ implant = "pimplant" ######################################################################## # # End # ######################################################################## ############################################################################### # # Define self-tests # ############################################################################### if __name__ == "__main__": def smalltest( self): """Create layout instances for quick development debugging. """ i = 0 x = 0 y = 0 param = ParamArray( w = 0.6, l = 0.18, fingers = 1, diffContactLeft = True, diffContactLeftCov = 0.7, gateContactLeft = False, gateContactLeftCov = 0.7, diffContactCenter = False, diffContactCenterCov = 0.5, gateContactCenter = False, gateContactCenterCov = 0.5, diffContactRight = False, diffContactRightCov = 1.0, gateContactRight = True, gateContactRightCov = 1.0, ) for master in [ "nmos_thkox", "pmos_thkox"]: inst = Instance(("%s" % master), param, None, ("I%d" % i)) inst.setOrigin( Point( x,y)) i += 1 if (i % 4): x += 10 else: x = 0 y += 10 param = ParamArray( w = 2.0, l = 1.5, fingers = 1, diffContactLeft = True, diffContactLeftCov = 0.3, gateContactLeft = True, gateContactLeftCov = 0.3, diffContactCenter = True, diffContactCenterCov = 0.5, gateContactCenter = True, gateContactCenterCov = 0.5, diffContactRight = True, diffContactRightCov = 0.7, gateContactRight = True, gateContactRightCov = 0.7, ) for master in [ "nmos_thkox", "pmos_thkox"]: inst = Instance(("%s" % master), param, None, ("I%d" % i)) inst.setOrigin( Point( x,y)) i += 1 if (i % 4): x += 10 else: x = 0 y += 10 param = ParamArray( w = 2.0, l = 1.5, fingers = 2, diffContactLeft = True, diffContactLeftCov = 0.3, gateContactLeft = True, gateContactLeftCov = 0.3, diffContactCenter = True, diffContactCenterCov = 0.5, gateContactCenter = True, gateContactCenterCov = 0.5, diffContactRight = True, diffContactRightCov = 1.0, gateContactRight = True, gateContactRightCov = 1.0, ) for master in [ "nmos_thkox", "pmos_thkox"]: inst = Instance(("%s" % master), param, None, ("I%d" % i)) inst.setOrigin( Point( x,y)) i += 1 if (i % 4): x += 10 else: x = 0 y += 10 param = ParamArray( w = 2.0, l = 1.5, fingers = 2, diffContactLeft = False, diffContactLeftCov = 1.0, gateContactLeft = True, gateContactLeftCov = 1.0, diffContactCenter = False, diffContactCenterCov = 0.5, gateContactCenter = True, gateContactCenterCov = 0.6, diffContactRight = True, diffContactRightCov = 0.4, gateContactRight = False, gateContactRightCov = 0.4, ) for master in [ "nmos_thkox", "pmos_thkox"]: inst = Instance(("%s" % master), param, None, ("I%d" % i)) inst.setOrigin( Point( x,y)) i += 1 if (i % 4): x += 10 else: x = 0 y += 20 self.save() def bigtest( self): """Create layout instances for comprehensive testing, such as DRC or regression testing. """ i = 0 x = 0 y = 0 for w in [ 0.09, 2.0]: for l in [ 0.05, 1.0]: for fingers in [ 1, 2]: for diffContactLeftCov in [ 0.0, 0.33, 1.0]: for gateContactLeftCov in [ 0.0, 0.33, 1.0]: for diffContactCenterCov in [ 0.0, 0.33, 1.0]: for gateContactCenterCov in [ 0.0, 0.33, 1.0]: for diffContactRightCov in [ 0.0, 0.33, 1.0]: for gateContactRightCov in [ 0.0, 0.33, 1.0]: param = ParamArray( w = w, l = l, fingers = fingers, diffContactLeft = (not diffContactLeftCov), diffContactLeftCov = diffContactLeftCov, gateContactLeft = (not gateContactLeftCov), gateContactLeftCov = gateContactLeftCov, diffContactCenter = (not diffContactCenterCov), diffContactCenterCov = diffContactCenterCov, gateContactCenter = (not gateContactCenterCov), gateContactCenterCov = gateContactCenterCov, diffContactRight = (not diffContactRightCov), diffContactRightCov = diffContactRightCov, gateContactRight = (not gateContactRightCov), gateContactRightCov = gateContactRightCov, ) for master in [ "nmos_thkox", "pmos_thkox"]: inst = Instance(("%s" % master), param, None, ("I%d" % i)) inst.setOrigin( Point( x,y)) i += 1 if (i % 100): x += 20 else: x = 0 y += 20 print("Total number of instances created: %d" % i) self.save() # TEST is defined externally from this file. # For building the test cases, invoke like this: # cnpy -c "TEST='SMALL';execfile('Mosfet.py')" if "TEST" in vars(): if vars()["TEST"] == "SMALL": MosfetTemplate.unitTest(lambda: ParamArray(), "MyPyCellLib", "UNITTEST_Mosfet", "layout") DloGen.withNewDlo( smalltest, "MyPyCellLib", "SMALLTEST_Mosfet", "layout") elif vars()["TEST"] == "BIG": DloGen.withNewDlo( bigtest, "MyPyCellLib", "BIGTEST_Mosfet", "layout") else: DloGen.withNewDlo( smalltest, "MyPyCellLib", "SMALLTEST_Mosfet", "layout") # end
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import absolute_import, division, print_function, unicode_literals import os from builtins import str from pants.base.build_environment import get_buildroot from pants.base.exceptions import TaskError from pants.build_graph.files import Files from pants.cache.cache_setup import CacheSetup from pants.option.arg_splitter import GLOBAL_SCOPE from pants.subsystem.subsystem import Subsystem from pants.subsystem.subsystem_client_mixin import SubsystemDependency from pants.task.task import Task from pants.util.dirutil import safe_rmtree from pants_test.task_test_base import TaskTestBase class DummyTask(Task): """A task that appends the content of a Files's sources into its results_dir.""" _implementation_version = 0 _force_fail = False @property def incremental(self): return self._incremental @property def cache_target_dirs(self): return True @classmethod def implementation_version_str(cls): # NB: Intentionally ignoring `super` and returning a simplified version. return str(cls._implementation_version) # Enforces a single VT and returns a tuple of (vt, was_valid). def execute(self): with self.invalidated(self.context.targets()) as invalidation: assert len(invalidation.all_vts) == 1 vt = invalidation.all_vts[0] was_valid = vt.valid if not was_valid: if vt.is_incremental: assert os.path.isdir(vt.previous_results_dir) for source in vt.target.sources_relative_to_buildroot(): with open(os.path.join(get_buildroot(), source), 'r') as infile: outfile_name = os.path.join(vt.results_dir, source) with open(outfile_name, 'a') as outfile: outfile.write(infile.read()) if self._force_fail: raise TaskError('Task forced to fail before updating vt state.') vt.update() return vt, was_valid class FakeTask(Task): _impls = [] @classmethod def implementation_version(cls): return super(FakeTask, cls).implementation_version() + cls._impls @classmethod def supports_passthru_args(cls): return True options_scope = 'fake-task' _deps = () @classmethod def subsystem_dependencies(cls): return super(FakeTask, cls).subsystem_dependencies() + cls._deps def execute(self): pass class OtherFakeTask(FakeTask): _other_impls = [] @classmethod def supports_passthru_args(cls): return False @classmethod def implementation_version(cls): return super(OtherFakeTask, cls).implementation_version() + cls._other_impls options_scope = 'other-fake-task' class FakeSubsystem(Subsystem): options_scope = 'fake-subsystem' @classmethod def register_options(cls, register): super(FakeSubsystem, cls).register_options(register) register('--fake-option', type=bool) class AnotherFakeTask(Task): options_scope = 'another-fake-task' @classmethod def supports_passthru_args(cls): return True @classmethod def subsystem_dependencies(cls): return super(AnotherFakeTask, cls).subsystem_dependencies() + (FakeSubsystem.scoped(cls),) def execute(self): pass class YetAnotherFakeTask(AnotherFakeTask): options_scope = 'yet-another-fake-task' @classmethod def supports_passthru_args(cls): return False class TaskTest(TaskTestBase): _filename = 'f' _file_contents = 'results_string\n' _cachedir = 'local_artifact_cache' @classmethod def task_type(cls): return DummyTask def assertContent(self, vt, content): with open(os.path.join(vt.current_results_dir, self._filename), 'r') as f: self.assertEquals(f.read(), content) def _toggle_cache(self, enable_artifact_cache): cache_dir = self.create_dir(self._cachedir) self.set_options_for_scope( CacheSetup.options_scope, write_to=[cache_dir], read_from=[cache_dir], write=enable_artifact_cache, read=enable_artifact_cache, ) def _fixture(self, incremental, options=None): target = self.make_target( ':t', target_type=Files, sources=[self._filename], make_missing_sources=False, ) context = self.context(options=options, target_roots=[target]) task = self.create_task(context) task._incremental = incremental return task, target def _run_fixture(self, content=None, incremental=False, artifact_cache=False, options=None): content = content or self._file_contents self._toggle_cache(artifact_cache) task, target = self._fixture(incremental=incremental, options=options) self._create_clean_file(target, content) vtA, was_valid = task.execute() return task, vtA, was_valid def _create_clean_file(self, target, content): self.create_file(self._filename, content) target.mark_invalidation_hash_dirty() def _cache_ignore_options(self, globally=False): return { 'cache' + ('' if globally else '.' + self.options_scope): { 'ignore': True } } def _synthesize_subtype(self, name='A', scope=None, cls=FakeTask, **kwargs): """Generate a synthesized subtype of `cls`.""" if scope is None: scope = cls.options_scope subclass_name = b'test_{0}_{1}_{2}'.format(cls.__name__, scope, name) kwargs['options_scope'] = scope return type(subclass_name, (cls,), kwargs) def _instantiate_synthesized_type(self, task_type, **kwargs): """Generate a new instance of the synthesized type `task_type`.""" ctx = super(TaskTestBase, self).context(for_task_types=[task_type], **kwargs) return task_type(ctx, self.test_workdir) def _task_type_to_fp(self, task_type, **kwargs): """Instantiate the `task_type` and return its fingerprint.""" task_object = self._instantiate_synthesized_type(task_type, **kwargs) return task_object.fingerprint def _synth_fp(self, scope=None, cls=FakeTask, options_fingerprintable=None, **kwargs): """Synthesize a subtype of `cls`, instantiate it, and take its fingerprint. `options_fingerprintable` describes the registered options in their respective scopes which can contribute to the task fingerprint.""" task_type = self._synthesize_subtype(scope=scope, cls=cls, **kwargs) return self._task_type_to_fp( task_type, options_fingerprintable=options_fingerprintable) def test_revert_after_failure(self): # Regression test to catch the following scenario: # # 1) In state A: Task suceeds and writes some output. Key is recorded by the invalidator. # 2) In state B: Task fails, but writes some output. Key is not recorded. # 3) After reverting back to state A: The current key is the same as the one recorded at the # end of step 1), so it looks like no work needs to be done, but actually the task # must re-run, to overwrite the output written in step 2. good_content = "good_content" bad_content = "bad_content" task, target = self._fixture(incremental=False) # Clean run succeeds. self._create_clean_file(target, good_content) vt, was_valid = task.execute() self.assertFalse(was_valid) self.assertContent(vt, good_content) # Change causes the task to fail. self._create_clean_file(target, bad_content) task._force_fail = True self.assertRaises(TaskError, task.execute) task._force_fail = False # Reverting to the previous content should invalidate, so the task # can reset any state created by the failed run. self._create_clean_file(target, good_content) vt, was_valid = task.execute() self.assertFalse(was_valid) self.assertContent(vt, good_content) def test_incremental(self): """Run three times with two unique fingerprints.""" one = '1\n' two = '2\n' three = '3\n' task, target = self._fixture(incremental=True) # Clean - this is the first run so the VT is invalid. self._create_clean_file(target, one) vtA, was_A_valid = task.execute() self.assertFalse(was_A_valid) self.assertContent(vtA, one) # Changed the source file, so it copies the results from vtA. self._create_clean_file(target, two) vtB, was_B_valid = task.execute() self.assertFalse(was_B_valid) self.assertEqual(vtB.previous_cache_key, vtA.cache_key) self.assertContent(vtB, one + two) self.assertTrue(vtB.has_previous_results_dir) # Another changed source means a new cache_key. The previous_results_dir is copied. self._create_clean_file(target, three) vtC, was_C_valid = task.execute() self.assertFalse(was_C_valid) self.assertTrue(vtC.has_previous_results_dir) self.assertEqual(vtC.previous_results_dir, vtB.current_results_dir) self.assertContent(vtC, one + two + three) # Again create a clean file but this time reuse an old cache key - in this case vtB. self._create_clean_file(target, two) # This VT will be invalid, since there is no cache hit and it doesn't match the immediately # previous run. It will wipe the invalid vtB.current_results_dir and followup by copying in the # most recent results_dir, from vtC. vtD, was_D_valid = task.execute() self.assertFalse(was_D_valid) self.assertTrue(vtD.has_previous_results_dir) self.assertEqual(vtD.previous_results_dir, vtC.current_results_dir) self.assertContent(vtD, one + two + three + two) # And that the results_dir was stable throughout. self.assertEqual(vtA.results_dir, vtB.results_dir) self.assertEqual(vtB.results_dir, vtD.results_dir) def test_non_incremental(self): """Non-incremental should be completely unassociated.""" one = '1\n' two = '2\n' task, target = self._fixture(incremental=False) # Run twice. self._create_clean_file(target, one) vtA, _ = task.execute() self.assertContent(vtA, one) self._create_clean_file(target, two) vtB, _ = task.execute() # Confirm two unassociated current directories with a stable results_dir. self.assertContent(vtA, one) self.assertContent(vtB, two) self.assertNotEqual(vtA.current_results_dir, vtB.current_results_dir) self.assertEqual(vtA.results_dir, vtB.results_dir) def test_implementation_version(self): """When the implementation version changes, previous artifacts are not available.""" one = '1\n' two = '2\n' task, target = self._fixture(incremental=True) # Run twice, with a different implementation version the second time. DummyTask._implementation_version = 0 self._create_clean_file(target, one) vtA, _ = task.execute() self.assertContent(vtA, one) DummyTask._implementation_version = 1 self._create_clean_file(target, two) vtB, _ = task.execute() # No incrementalism. self.assertFalse(vtA.is_incremental) self.assertFalse(vtB.is_incremental) # Confirm two unassociated current directories, and unassociated stable directories. self.assertContent(vtA, one) self.assertContent(vtB, two) self.assertNotEqual(vtA.current_results_dir, vtB.current_results_dir) self.assertNotEqual(vtA.results_dir, vtB.results_dir) def test_execute_cleans_invalid_result_dirs(self): # Regression test to protect task.execute() from returning invalid dirs. task, vt, _ = self._run_fixture() self.assertNotEqual(os.listdir(vt.results_dir), []) self.assertTrue(os.path.islink(vt.results_dir)) # Mimic the failure case, where an invalid task is run twice, due to failed download or # something. vt.force_invalidate() # But if this VT is invalid for a second run, the next invalidation deletes and recreates. self.assertTrue(os.path.islink(vt.results_dir)) self.assertTrue(os.path.isdir(vt.current_results_dir)) def test_cache_hit_short_circuits_incremental_copy(self): # Tasks should only copy over previous results if there is no cache hit, otherwise the copy is # wasted. first_contents = 'staid country photo' second_contents = 'shocking tabloid secret' self.assertFalse(self.buildroot_files(self._cachedir)) # Initial run will have been invalid no cache hit, and with no previous_results_dir. task, vtA, was_A_valid = self._run_fixture(content=first_contents, incremental=True, artifact_cache=True) self.assertTrue(self.buildroot_files(self._cachedir)) self.assertTrue(task.incremental) self.assertFalse(was_A_valid) # Invalidate and then execute with the same cache key. # Should be valid due to the artifact cache hit. No previous_results_dir will have been copied. vtA.force_invalidate() vtB, was_B_valid = task.execute() self.assertEqual(vtA.cache_key.hash, vtB.cache_key.hash) self.assertTrue(was_B_valid) self.assertFalse(vtB.has_previous_results_dir) # Change the cache_key and disable the cache_reads. # This results in an invalid vt, with no cache hit. It will then copy the vtB.previous_results # into vtC.results_dir. self._toggle_cache(False) self._create_clean_file(vtB.target, second_contents) vtC, was_C_valid = task.execute() self.assertNotEqual(vtB.cache_key.hash, vtC.cache_key.hash) self.assertEqual(vtC.previous_cache_key, vtB.cache_key) self.assertFalse(was_C_valid) self.assertTrue(vtC.has_previous_results_dir) self.assertEqual(vtB.current_results_dir, vtC.previous_results_dir) # Verify the content. The task was invalid twice - the initial run and the run with the changed # source file. Only vtC (previous sucessful runs + cache miss) resulted in copying the # previous_results. self.assertContent(vtC, first_contents + second_contents) # live_dirs() is in cache_manager, but like all of these tests, only makes sense to test as a # sequence of task runs. def test_live_dirs(self): task, vtA, _ = self._run_fixture(incremental=True) vtA_live = list(vtA.live_dirs()) self.assertIn(vtA.results_dir, vtA_live) self.assertIn(vtA.current_results_dir, vtA_live) self.assertEqual(len(vtA_live), 2) self._create_clean_file(vtA.target, 'bar') vtB, _ = task.execute() vtB_live = list(vtB.live_dirs()) # This time it contains the previous_results_dir. self.assertIn(vtB.results_dir, vtB_live) self.assertIn(vtB.current_results_dir, vtB_live) self.assertIn(vtA.current_results_dir, vtB_live) self.assertEqual(len(vtB_live), 3) # Delete vtB results_dir. live_dirs() should only return existing dirs, even if it knows the # previous_cache_key. safe_rmtree(vtB.current_results_dir) self._create_clean_file(vtB.target, 'baz') vtC, _ = task.execute() vtC_live = list(vtC.live_dirs()) self.assertNotIn(vtB.current_results_dir, vtC_live) self.assertEqual(len(vtC_live), 2) def test_ignore_global(self): _, vtA, was_valid = self._run_fixture() self.assertFalse(was_valid) self.assertTrue(vtA.cacheable) self.reset_build_graph() _, vtA, was_valid = self._run_fixture() self.assertTrue(was_valid) self.assertTrue(vtA.cacheable) self.reset_build_graph() _, vtA, was_valid = self._run_fixture(options=self._cache_ignore_options(globally=True)) self.assertFalse(was_valid) self.assertFalse(vtA.cacheable) def test_ignore(self): _, vtA, was_valid = self._run_fixture() self.assertFalse(was_valid) self.assertTrue(vtA.cacheable) self.reset_build_graph() _, vtA, was_valid = self._run_fixture() self.assertTrue(was_valid) self.assertTrue(vtA.cacheable) self.reset_build_graph() _, vtA, was_valid = self._run_fixture(options=self._cache_ignore_options()) self.assertFalse(was_valid) self.assertFalse(vtA.cacheable) def test_fingerprint_identity(self): """Tasks formed with the same parameters should have the same fingerprint (smoke test).""" x = self._synth_fp() y = self._synth_fp() self.assertEqual(y, x) def test_fingerprint_implementation_version_single(self): """Tasks with a different implementation_version() should have different fingerprints.""" empty_impls = self._synth_fp(_impls=[]) zero_version = self._synth_fp(_impls=[('asdf', 0)]) self.assertNotEqual(zero_version, empty_impls) one_version = self._synth_fp(_impls=[('asdf', 1)]) self.assertNotEqual(one_version, empty_impls) alt_name_version = self._synth_fp(_impls=[('xxx', 0)]) self.assertNotEqual(alt_name_version, zero_version) zero_one_version = self._synth_fp(_impls=[('asdf', 0), ('asdf', 1)]) self.assertNotEqual(zero_one_version, zero_version) self.assertNotEqual(zero_one_version, one_version) def test_fingerprint_implementation_version_inheritance(self): """The implementation_version() of superclasses of the task should affect the task fingerprint.""" versioned_fake = self._synth_fp(_impls=[('asdf', 0)]) base_version_other_fake = self._synth_fp( cls=OtherFakeTask, _impls=[('asdf', 0)], _other_impls=[], ) self.assertNotEqual(base_version_other_fake, versioned_fake) extended_version_other_fake = self._synth_fp( cls=OtherFakeTask, _impls=[('asdf', 0)], _other_impls=[('xxx', 0)], ) self.assertNotEqual(extended_version_other_fake, base_version_other_fake) extended_version_copy = self._synth_fp( cls=OtherFakeTask, _impls=[('asdf', 1)], _other_impls=[('xxx', 0)], ) self.assertNotEqual(extended_version_copy, extended_version_other_fake) def test_stable_name(self): """The stable_name() should be used to form the task fingerprint.""" a_fingerprint = self._synth_fp(name='some_name', _stable_name='xxx') b_fingerprint = self._synth_fp(name='some_name', _stable_name='yyy') self.assertNotEqual(b_fingerprint, a_fingerprint) def test_fingerprint_changing_options_scope(self): """The options_scope of the task and any of its subsystem_dependencies should affect the task fingerprint.""" task_fp = self._synth_fp(scope='xxx') other_task_fp = self._synth_fp(scope='yyy') self.assertNotEqual(other_task_fp, task_fp) subsystem_deps_fp = self._synth_fp(scope='xxx', _deps=(SubsystemDependency(FakeSubsystem, GLOBAL_SCOPE),)) self.assertNotEqual(subsystem_deps_fp, task_fp) scoped_subsystems_fp = self._synth_fp(scope='xxx', _deps=(SubsystemDependency(FakeSubsystem, 'xxx'),)) self.assertNotEqual(scoped_subsystems_fp, subsystem_deps_fp) def test_fingerprint_options_on_registered_scopes_only(self): """Changing or setting an option value should only affect the task fingerprint if it is registered as a fingerprintable option.""" default_fp = self._synth_fp(cls=AnotherFakeTask, options_fingerprintable={}) self.set_options_for_scope( AnotherFakeTask.options_scope, **{'fake-option': False}) unregistered_option_fp = self._synth_fp(cls=AnotherFakeTask, options_fingerprintable={}) self.assertEqual(unregistered_option_fp, default_fp) registered_option_fp = self._synth_fp(cls=AnotherFakeTask, options_fingerprintable={ AnotherFakeTask.options_scope: {'fake-option': bool}, }) self.assertNotEqual(registered_option_fp, default_fp) def test_fingerprint_changing_option_value(self): """Changing an option value in some scope should affect the task fingerprint.""" cur_option_spec = { AnotherFakeTask.options_scope: {'fake-option': bool}, } self.set_options_for_scope( AnotherFakeTask.options_scope, **{'fake-option': False}) task_opt_false_fp = self._synth_fp(cls=AnotherFakeTask, options_fingerprintable=cur_option_spec) self.set_options_for_scope( AnotherFakeTask.options_scope, **{'fake-option': True}) task_opt_true_fp = self._synth_fp(cls=AnotherFakeTask, options_fingerprintable=cur_option_spec) self.assertNotEqual(task_opt_true_fp, task_opt_false_fp) def test_fingerprint_passthru_args(self): """Passthrough arguments should affect fingerprints iff the task supports passthrough args.""" task_type_base = self._synthesize_subtype(cls=AnotherFakeTask) empty_passthru_args_fp = self._task_type_to_fp( task_type_base, passthru_args=[], ) non_empty_passthru_args_fp = self._task_type_to_fp( task_type_base, passthru_args=['something'], ) self.assertNotEqual(non_empty_passthru_args_fp, empty_passthru_args_fp) # YetAnotherFakeTask.supports_passthru_args() returns False task_type_derived_ignore_passthru = self._synthesize_subtype(cls=YetAnotherFakeTask) different_task_with_same_opts_fp = self._task_type_to_fp( task_type_derived_ignore_passthru, passthru_args=[], ) different_task_with_passthru_fp = self._task_type_to_fp( task_type_derived_ignore_passthru, passthru_args=['asdf'], ) self.assertEqual(different_task_with_passthru_fp, different_task_with_same_opts_fp)
from ..Qt import QtGui, QtCore import os, weakref, re from ..pgcollections import OrderedDict from ..python2_3 import asUnicode, basestring from .ParameterItem import ParameterItem PARAM_TYPES = {} PARAM_NAMES = {} def registerParameterType(name, cls, override=False): global PARAM_TYPES if name in PARAM_TYPES and not override: raise Exception("Parameter type '%s' already exists (use override=True to replace)" % name) PARAM_TYPES[name] = cls PARAM_NAMES[cls] = name def __reload__(old): PARAM_TYPES.update(old.get('PARAM_TYPES', {})) PARAM_NAMES.update(old.get('PARAM_NAMES', {})) class Parameter(QtCore.QObject): """ A Parameter is the basic unit of data in a parameter tree. Each parameter has a name, a type, a value, and several other properties that modify the behavior of the Parameter. Parameters may have parent / child / sibling relationships to construct organized hierarchies. Parameters generally do not have any inherent GUI or visual interpretation; instead they manage ParameterItem instances which take care of display and user interaction. Note: It is fairly uncommon to use the Parameter class directly; mostly you will use subclasses which provide specialized type and data handling. The static pethod Parameter.create(...) is an easy way to generate instances of these subclasses. For more Parameter types, see ParameterTree.parameterTypes module. =================================== ========================================================= **Signals:** sigStateChanged(self, change, info) Emitted when anything changes about this parameter at all. The second argument is a string indicating what changed ('value', 'childAdded', etc..) The third argument can be any extra information about the change sigTreeStateChanged(self, changes) Emitted when any child in the tree changes state (but only if monitorChildren() is called) the format of *changes* is [(param, change, info), ...] sigValueChanged(self, value) Emitted when value is finished changing sigValueChanging(self, value) Emitted immediately for all value changes, including during editing. sigChildAdded(self, child, index) Emitted when a child is added sigChildRemoved(self, child) Emitted when a child is removed sigRemoved(self) Emitted when this parameter is removed sigParentChanged(self, parent) Emitted when this parameter's parent has changed sigLimitsChanged(self, limits) Emitted when this parameter's limits have changed sigDefaultChanged(self, default) Emitted when this parameter's default value has changed sigNameChanged(self, name) Emitted when this parameter's name has changed sigOptionsChanged(self, opts) Emitted when any of this parameter's options have changed =================================== ========================================================= """ ## name, type, limits, etc. ## can also carry UI hints (slider vs spinbox, etc.) sigValueChanged = QtCore.Signal(object, object) ## self, value emitted when value is finished being edited sigValueChanging = QtCore.Signal(object, object) ## self, value emitted as value is being edited sigChildAdded = QtCore.Signal(object, object, object) ## self, child, index sigChildRemoved = QtCore.Signal(object, object) ## self, child sigRemoved = QtCore.Signal(object) ## self sigParentChanged = QtCore.Signal(object, object) ## self, parent sigLimitsChanged = QtCore.Signal(object, object) ## self, limits sigDefaultChanged = QtCore.Signal(object, object) ## self, default sigNameChanged = QtCore.Signal(object, object) ## self, name sigOptionsChanged = QtCore.Signal(object, object) ## self, {opt:val, ...} ## Emitted when anything changes about this parameter at all. ## The second argument is a string indicating what changed ('value', 'childAdded', etc..) ## The third argument can be any extra information about the change sigStateChanged = QtCore.Signal(object, object, object) ## self, change, info ## emitted when any child in the tree changes state ## (but only if monitorChildren() is called) sigTreeStateChanged = QtCore.Signal(object, object) # self, changes # changes = [(param, change, info), ...] # bad planning. #def __new__(cls, *args, **opts): #try: #cls = PARAM_TYPES[opts['type']] #except KeyError: #pass #return QtCore.QObject.__new__(cls, *args, **opts) @staticmethod def create(**opts): """ Static method that creates a new Parameter (or subclass) instance using opts['type'] to select the appropriate class. All options are passed directly to the new Parameter's __init__ method. Use registerParameterType() to add new class types. """ typ = opts.get('type', None) if typ is None: cls = Parameter else: cls = PARAM_TYPES[opts['type']] return cls(**opts) def __init__(self, **opts): """ Initialize a Parameter object. Although it is rare to directly create a Parameter instance, the options available to this method are also allowed by most Parameter subclasses. ======================= ========================================================= **Keyword Arguments:** name The name to give this Parameter. This is the name that will appear in the left-most column of a ParameterTree for this Parameter. value The value to initially assign to this Parameter. default The default value for this Parameter (most Parameters provide an option to 'reset to default'). children A list of children for this Parameter. Children may be given either as a Parameter instance or as a dictionary to pass to Parameter.create(). In this way, it is possible to specify complex hierarchies of Parameters from a single nested data structure. readonly If True, the user will not be allowed to edit this Parameter. (default=False) enabled If False, any widget(s) for this parameter will appear disabled. (default=True) visible If False, the Parameter will not appear when displayed in a ParameterTree. (default=True) renamable If True, the user may rename this Parameter. (default=False) removable If True, the user may remove this Parameter. (default=False) expanded If True, the Parameter will appear expanded when displayed in a ParameterTree (its children will be visible). (default=True) title (str or None) If specified, then the parameter will be displayed to the user using this string as its name. However, the parameter will still be referred to internally using the *name* specified above. Note that this option is not compatible with renamable=True. (default=None; added in version 0.9.9) ======================= ========================================================= """ QtCore.QObject.__init__(self) self.opts = { 'type': None, 'readonly': False, 'visible': True, 'enabled': True, 'renamable': False, 'removable': False, 'strictNaming': False, # forces name to be usable as a python variable 'expanded': True, 'title': None, #'limits': None, ## This is a bad plan--each parameter type may have a different data type for limits. } value = opts.get('value', None) name = opts.get('name', None) self.opts.update(opts) self.opts['value'] = None # will be set later. self.opts['name'] = None self.childs = [] self.names = {} ## map name:child self.items = weakref.WeakKeyDictionary() ## keeps track of tree items representing this parameter self._parent = None self.treeStateChanges = [] ## cache of tree state changes to be delivered on next emit self.blockTreeChangeEmit = 0 #self.monitoringChildren = False ## prevent calling monitorChildren more than once if not isinstance(name, basestring): raise Exception("Parameter must have a string name specified in opts.") self.setName(name) self.addChildren(self.opts.get('children', [])) self.opts['value'] = None if value is not None: self.setValue(value) if 'default' not in self.opts: self.opts['default'] = None self.setDefault(self.opts['value']) ## Connect all state changed signals to the general sigStateChanged self.sigValueChanged.connect(lambda param, data: self.emitStateChanged('value', data)) self.sigChildAdded.connect(lambda param, *data: self.emitStateChanged('childAdded', data)) self.sigChildRemoved.connect(lambda param, data: self.emitStateChanged('childRemoved', data)) self.sigParentChanged.connect(lambda param, data: self.emitStateChanged('parent', data)) self.sigLimitsChanged.connect(lambda param, data: self.emitStateChanged('limits', data)) self.sigDefaultChanged.connect(lambda param, data: self.emitStateChanged('default', data)) self.sigNameChanged.connect(lambda param, data: self.emitStateChanged('name', data)) self.sigOptionsChanged.connect(lambda param, data: self.emitStateChanged('options', data)) #self.watchParam(self) ## emit treechange signals if our own state changes def name(self): """Return the name of this Parameter.""" return self.opts['name'] def setName(self, name): """Attempt to change the name of this parameter; return the actual name. (The parameter may reject the name change or automatically pick a different name)""" if self.opts['strictNaming']: if len(name) < 1 or re.search(r'\W', name) or re.match(r'\d', name[0]): raise Exception("Parameter name '%s' is invalid. (Must contain only alphanumeric and underscore characters and may not start with a number)" % name) parent = self.parent() if parent is not None: name = parent._renameChild(self, name) ## first ask parent if it's ok to rename if self.opts['name'] != name: self.opts['name'] = name self.sigNameChanged.emit(self, name) return name def type(self): """Return the type string for this Parameter.""" return self.opts['type'] def isType(self, typ): """ Return True if this parameter type matches the name *typ*. This can occur either of two ways: - If self.type() == *typ* - If this parameter's class is registered with the name *typ* """ if self.type() == typ: return True global PARAM_TYPES cls = PARAM_TYPES.get(typ, None) if cls is None: raise Exception("Type name '%s' is not registered." % str(typ)) return self.__class__ is cls def childPath(self, child): """ Return the path of parameter names from self to child. If child is not a (grand)child of self, return None. """ path = [] while child is not self: path.insert(0, child.name()) child = child.parent() if child is None: return None return path def setValue(self, value, blockSignal=None): """ Set the value of this Parameter; return the actual value that was set. (this may be different from the value that was requested) """ try: if blockSignal is not None: self.sigValueChanged.disconnect(blockSignal) value = self._interpretValue(value) if self.opts['value'] == value: return value self.opts['value'] = value self.sigValueChanged.emit(self, value) finally: if blockSignal is not None: self.sigValueChanged.connect(blockSignal) return value def _interpretValue(self, v): return v def value(self): """ Return the value of this Parameter. """ return self.opts['value'] def getValues(self): """Return a tree of all values that are children of this parameter""" vals = OrderedDict() for ch in self: vals[ch.name()] = (ch.value(), ch.getValues()) return vals def saveState(self, filter=None): """ Return a structure representing the entire state of the parameter tree. The tree state may be restored from this structure using restoreState(). If *filter* is set to 'user', then only user-settable data will be included in the returned state. """ if filter is None: state = self.opts.copy() if state['type'] is None: global PARAM_NAMES state['type'] = PARAM_NAMES.get(type(self), None) elif filter == 'user': state = {'value': self.value()} else: raise ValueError("Unrecognized filter argument: '%s'" % filter) ch = OrderedDict([(ch.name(), ch.saveState(filter=filter)) for ch in self]) if len(ch) > 0: state['children'] = ch return state def restoreState(self, state, recursive=True, addChildren=True, removeChildren=True, blockSignals=True): """ Restore the state of this parameter and its children from a structure generated using saveState() If recursive is True, then attempt to restore the state of child parameters as well. If addChildren is True, then any children which are referenced in the state object will be created if they do not already exist. If removeChildren is True, then any children which are not referenced in the state object will be removed. If blockSignals is True, no signals will be emitted until the tree has been completely restored. This prevents signal handlers from responding to a partially-rebuilt network. """ state = state.copy() childState = state.pop('children', []) ## list of children may be stored either as list or dict. if isinstance(childState, dict): cs = [] for k,v in childState.items(): cs.append(v.copy()) cs[-1].setdefault('name', k) childState = cs if blockSignals: self.blockTreeChangeSignal() try: self.setOpts(**state) if not recursive: return ptr = 0 ## pointer to first child that has not been restored yet foundChilds = set() #print "==============", self.name() for ch in childState: name = ch['name'] #typ = ch.get('type', None) #print('child: %s, %s' % (self.name()+'.'+name, typ)) ## First, see if there is already a child with this name gotChild = False for i, ch2 in enumerate(self.childs[ptr:]): #print " ", ch2.name(), ch2.type() if ch2.name() != name: # or not ch2.isType(typ): continue gotChild = True #print " found it" if i != 0: ## move parameter to next position #self.removeChild(ch2) self.insertChild(ptr, ch2) #print " moved to position", ptr ch2.restoreState(ch, recursive=recursive, addChildren=addChildren, removeChildren=removeChildren) foundChilds.add(ch2) break if not gotChild: if not addChildren: #print " ignored child" continue #print " created new" ch2 = Parameter.create(**ch) self.insertChild(ptr, ch2) foundChilds.add(ch2) ptr += 1 if removeChildren: for ch in self.childs[:]: if ch not in foundChilds: #print " remove:", ch self.removeChild(ch) finally: if blockSignals: self.unblockTreeChangeSignal() def defaultValue(self): """Return the default value for this parameter.""" return self.opts['default'] def setDefault(self, val): """Set the default value for this parameter.""" if self.opts['default'] == val: return self.opts['default'] = val self.sigDefaultChanged.emit(self, val) def setToDefault(self): """Set this parameter's value to the default.""" if self.hasDefault(): self.setValue(self.defaultValue()) def hasDefault(self): """Returns True if this parameter has a default value.""" return 'default' in self.opts def valueIsDefault(self): """Returns True if this parameter's value is equal to the default value.""" return self.value() == self.defaultValue() def setLimits(self, limits): """Set limits on the acceptable values for this parameter. The format of limits depends on the type of the parameter and some parameters do not make use of limits at all.""" if 'limits' in self.opts and self.opts['limits'] == limits: return self.opts['limits'] = limits self.sigLimitsChanged.emit(self, limits) return limits def writable(self): """ Returns True if this parameter's value can be changed by the user. Note that the value of the parameter can *always* be changed by calling setValue(). """ return not self.readonly() def setWritable(self, writable=True): """Set whether this Parameter should be editable by the user. (This is exactly the opposite of setReadonly).""" self.setOpts(readonly=not writable) def readonly(self): """ Return True if this parameter is read-only. (this is the opposite of writable()) """ return self.opts.get('readonly', False) def setReadonly(self, readonly=True): """Set whether this Parameter's value may be edited by the user (this is the opposite of setWritable()).""" self.setOpts(readonly=readonly) def setOpts(self, **opts): """ Set any arbitrary options on this parameter. The exact behavior of this function will depend on the parameter type, but most parameters will accept a common set of options: value, name, limits, default, readonly, removable, renamable, visible, enabled, and expanded. See :func:`Parameter.__init__ <pyqtgraph.parametertree.Parameter.__init__>` for more information on default options. """ changed = OrderedDict() for k in opts: if k == 'value': self.setValue(opts[k]) elif k == 'name': self.setName(opts[k]) elif k == 'limits': self.setLimits(opts[k]) elif k == 'default': self.setDefault(opts[k]) elif k not in self.opts or self.opts[k] != opts[k]: self.opts[k] = opts[k] changed[k] = opts[k] if len(changed) > 0: self.sigOptionsChanged.emit(self, changed) def emitStateChanged(self, changeDesc, data): ## Emits stateChanged signal and ## requests emission of new treeStateChanged signal self.sigStateChanged.emit(self, changeDesc, data) #self.treeStateChanged(self, changeDesc, data) self.treeStateChanges.append((self, changeDesc, data)) self.emitTreeChanges() def makeTreeItem(self, depth): """ Return a TreeWidgetItem suitable for displaying/controlling the content of this parameter. This is called automatically when a ParameterTree attempts to display this Parameter. Most subclasses will want to override this function. """ if hasattr(self, 'itemClass'): #print "Param:", self, "Make item from itemClass:", self.itemClass return self.itemClass(self, depth) else: return ParameterItem(self, depth=depth) def addChild(self, child, autoIncrementName=None): """ Add another parameter to the end of this parameter's child list. See insertChild() for a description of the *autoIncrementName* argument. """ return self.insertChild(len(self.childs), child, autoIncrementName=autoIncrementName) def addChildren(self, children): """ Add a list or dict of children to this parameter. This method calls addChild once for each value in *children*. """ ## If children was specified as dict, then assume keys are the names. if isinstance(children, dict): ch2 = [] for name, opts in children.items(): if isinstance(opts, dict) and 'name' not in opts: opts = opts.copy() opts['name'] = name ch2.append(opts) children = ch2 for chOpts in children: #print self, "Add child:", type(chOpts), id(chOpts) self.addChild(chOpts) def insertChild(self, pos, child, autoIncrementName=None): """ Insert a new child at pos. If pos is a Parameter, then insert at the position of that Parameter. If child is a dict, then a parameter is constructed using :func:`Parameter.create <pyqtgraph.parametertree.Parameter.create>`. By default, the child's 'autoIncrementName' option determines whether the name will be adjusted to avoid prior name collisions. This behavior may be overridden by specifying the *autoIncrementName* argument. This argument was added in version 0.9.9. """ if isinstance(child, dict): child = Parameter.create(**child) name = child.name() if name in self.names and child is not self.names[name]: if autoIncrementName is True or (autoIncrementName is None and child.opts.get('autoIncrementName', False)): name = self.incrementName(name) child.setName(name) else: raise Exception("Already have child named %s" % str(name)) if isinstance(pos, Parameter): pos = self.childs.index(pos) with self.treeChangeBlocker(): if child.parent() is not None: child.remove() self.names[name] = child self.childs.insert(pos, child) child.parentChanged(self) self.sigChildAdded.emit(self, child, pos) child.sigTreeStateChanged.connect(self.treeStateChanged) return child def removeChild(self, child): """Remove a child parameter.""" name = child.name() if name not in self.names or self.names[name] is not child: raise Exception("Parameter %s is not my child; can't remove." % str(child)) del self.names[name] self.childs.pop(self.childs.index(child)) child.parentChanged(None) self.sigChildRemoved.emit(self, child) try: child.sigTreeStateChanged.disconnect(self.treeStateChanged) except (TypeError, RuntimeError): ## already disconnected pass def clearChildren(self): """Remove all child parameters.""" for ch in self.childs[:]: self.removeChild(ch) def children(self): """Return a list of this parameter's children. Warning: this overrides QObject.children """ return self.childs[:] def hasChildren(self): """Return True if this Parameter has children.""" return len(self.childs) > 0 def parentChanged(self, parent): """This method is called when the parameter's parent has changed. It may be useful to extend this method in subclasses.""" self._parent = parent self.sigParentChanged.emit(self, parent) def parent(self): """Return the parent of this parameter.""" return self._parent def remove(self): """Remove this parameter from its parent's child list""" parent = self.parent() if parent is None: raise Exception("Cannot remove; no parent.") parent.removeChild(self) self.sigRemoved.emit(self) def incrementName(self, name): ## return an unused name by adding a number to the name given base, num = re.match('(.*)(\d*)', name).groups() numLen = len(num) if numLen == 0: num = 2 numLen = 1 else: num = int(num) while True: newName = base + ("%%0%dd"%numLen) % num if newName not in self.names: return newName num += 1 def __iter__(self): for ch in self.childs: yield ch def __getitem__(self, names): """Get the value of a child parameter. The name may also be a tuple giving the path to a sub-parameter:: value = param[('child', 'grandchild')] """ if not isinstance(names, tuple): names = (names,) return self.param(*names).value() def __setitem__(self, names, value): """Set the value of a child parameter. The name may also be a tuple giving the path to a sub-parameter:: param[('child', 'grandchild')] = value """ if isinstance(names, basestring): names = (names,) return self.param(*names).setValue(value) def child(self, *names): """Return a child parameter. Accepts the name of the child or a tuple (path, to, child) Added in version 0.9.9. Earlier versions used the 'param' method, which is still implemented for backward compatibility. """ try: param = self.names[names[0]] except KeyError: raise KeyError("Parameter %s has no child named %s" % (self.name(), names[0])) if len(names) > 1: return param.child(*names[1:]) else: return param def param(self, *names): # for backward compatibility. return self.child(*names) def __repr__(self): return asUnicode("<%s '%s' at 0x%x>") % (self.__class__.__name__, self.name(), id(self)) def __getattr__(self, attr): ## Leaving this undocumented because I might like to remove it in the future.. #print type(self), attr if 'names' not in self.__dict__: raise AttributeError(attr) if attr in self.names: import traceback traceback.print_stack() print("Warning: Use of Parameter.subParam is deprecated. Use Parameter.param(name) instead.") return self.param(attr) else: raise AttributeError(attr) def _renameChild(self, child, name): ## Only to be called from Parameter.rename if name in self.names: return child.name() self.names[name] = child del self.names[child.name()] return name def registerItem(self, item): self.items[item] = None def hide(self): """Hide this parameter. It and its children will no longer be visible in any ParameterTree widgets it is connected to.""" self.show(False) def show(self, s=True): """Show this parameter. """ self.opts['visible'] = s self.sigOptionsChanged.emit(self, {'visible': s}) def treeChangeBlocker(self): """ Return an object that can be used to temporarily block and accumulate sigTreeStateChanged signals. This is meant to be used when numerous changes are about to be made to the tree and only one change signal should be emitted at the end. Example:: with param.treeChangeBlocker(): param.addChild(...) param.removeChild(...) param.setValue(...) """ return SignalBlocker(self.blockTreeChangeSignal, self.unblockTreeChangeSignal) def blockTreeChangeSignal(self): """ Used to temporarily block and accumulate tree change signals. *You must remember to unblock*, so it is advisable to use treeChangeBlocker() instead. """ self.blockTreeChangeEmit += 1 def unblockTreeChangeSignal(self): """Unblocks enission of sigTreeStateChanged and flushes the changes out through a single signal.""" self.blockTreeChangeEmit -= 1 self.emitTreeChanges() def treeStateChanged(self, param, changes): """ Called when the state of any sub-parameter has changed. ============== ================================================================ **Arguments:** param The immediate child whose tree state has changed. note that the change may have originated from a grandchild. changes List of tuples describing all changes that have been made in this event: (param, changeDescr, data) ============== ================================================================ This function can be extended to react to tree state changes. """ self.treeStateChanges.extend(changes) self.emitTreeChanges() def emitTreeChanges(self): if self.blockTreeChangeEmit == 0: changes = self.treeStateChanges self.treeStateChanges = [] if len(changes) > 0: self.sigTreeStateChanged.emit(self, changes) class SignalBlocker(object): def __init__(self, enterFn, exitFn): self.enterFn = enterFn self.exitFn = exitFn def __enter__(self): self.enterFn() def __exit__(self, exc_type, exc_value, tb): self.exitFn()
#--------------------------------------------------------------------------- # Copyright 2012 The Open Source Electronic Health Record Agent # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #--------------------------------------------------------------------------- ## @package OSEHRAHelper ## OSEHRA test helper ''' OSEHRAHelper provides classes that establish connections to VistA and interaction methods such as write() and wait() @copyright The Open Source Electronic Health Record Agent @license http://www.apache.org/licenses/LICENSE-2.0 ''' import sys import os import telnetlib import TestHelper import time import re import logging import csv filedir = os.path.dirname(os.path.abspath(__file__)) pexpectdir = os.path.normpath(os.path.join(filedir, "../Pexpect")) import socket paramikoedir = os.path.normpath(os.path.join(filedir, "../")) sys.path.append(pexpectdir) sys.path.append(paramikoedir) try: import pexpect no_pexpect = None except ImportError, no_pexpect: pass try: import paramiko no_paramiko = None except ImportError, no_paramiko: pass #--------------------------------------------------------------------------- # Initial Global Variables to use over the course of connecting # connection=False # log =False #--------------------------------------------------------------------------- class PROMPT(object): """Wait for a VISTA> prompt in current namespace.""" class ConnectMUMPS(object): def exitToPrompt(self): self.write("Quit") while True: try: index2 = self.multiwait(["to continue","Option:",self.prompt, "want to halt","[0-9]+d[0-9]+"]) except TIMEOUT: continue if index2 == 1: self.write("Continue") self.wait("Do you want to halt") self.write("Y") self.wait(self.prompt) break if index2 == 2: break if index2 == 3: self.write("Y") if index2 == 4: self.write("Q") self.write("^") self.MenuLocation=[] def ZN(self, namespace): self.wait('>') self.write('ZN "' + namespace + '"') self.namespace = namespace self.prompt = self.namespace + '>' def login(self, username, password): self.wait('Username:') self.write(username) self.wait('Password') self.write(password) def getenv(self, volume): self.write('D GETENV^%ZOSV W Y') if sys.platform == 'win32': match = self.wait_re(volume + ':[0-9A-Za-z-]+', None) test = match[1].span() VistAboxvol = '' for i in range(test[0], test[1]): VistAboxvol = VistAboxvol + match[2][i] self.boxvol = VistAboxvol else: self.wait_re(volume + ':[0-9A-Za-z-]+', None) self.boxvol = self.connection.after def IEN(self, file, objectname): self.write('S DUZ=1 D Q^DI') self.wait('OPTION') self.write('5') self.wait_re('FILE:') self.write(file) self.wait(file + ' NAME') self.write(objectname + '\r') self.wait_re('CAPTIONED OUTPUT?') self.write('N') self.wait_re('PRINT FIELD') self.write('NUMBER\r') self.wait('Heading') self.write('') self.wait('DEVICE') if sys.platform == 'win32': self.write('\r') match = self.wait_re('\r\n[0-9]+') test = match[1].span() number = '' for i in range(test[0], test[1]): number = number + match[2][i] number = number.lstrip('\r\n') self.IENumber = number else: self.write('') self.wait_re('\n[0-9]+') number = self.connection.after number = number.lstrip('\r\n') self.IENumber = number self.write('') class ConnectWinCache(ConnectMUMPS): def __init__(self, logfile, instance, namespace, location='127.0.0.1'): super(ConnectMUMPS, self).__init__() self.connection = telnetlib.Telnet(location, 23) if len(namespace) == 0: namespace = 'VISTA' self.namespace = namespace self.prompt = self.namespace + '>' self.log = file(logfile, 'w') self.type = 'cache' path,filename = os.path.split(logfile) self.MenuLocation=[] self.lastconnection="" self.optionParentDict = [] self.optionMenuTextDict = [] def write(self, command): self.connection.write(command + '\r') logging.debug('connection.write:' + command) self.log.flush() def wait(self, command, tout=15): logging.debug('connection.expect: ' + str(command)) if command is PROMPT: command = self.namespace + '>' rbuf = self.connection.read_until(command, tout) if rbuf.find(command) == -1: self.log.write('ERROR: expected: ' + command + 'actual: ' + rbuf) logging.debug('ERROR: expected: ' + command + 'actual: ' + rbuf) raise TestHelper.TestError('ERROR: expected: ' + command + 'actual: ' + rbuf) else: self.log.write(rbuf) logging.debug(rbuf) self.lastconnection=rbuf return 1 def wait_re(self, command, timeout=30): logging.debug('connection.expect: ' + str(command)) if command is PROMPT: command = self.prompt compCommand = re.compile(command,re.I) output = self.connection.expect([compCommand], timeout) self.match = output[1] self.before = output[2] if output[0] == -1 and output[1] == None: raise Exception("Timed out") if output[2]: self.log.write(output[2]) self.log.flush() self.lastconnection=output[2] return output def multiwait(self, options, tout=15): logging.debug('connection.expect: ' + str(options)) if isinstance(options, list): index = self.connection.expect(options, tout) if index == -1: logging.debug('ERROR: expected: ' + str(options)) raise TestHelper.TestError('ERROR: expected: ' + str(options)) self.log.write(index[2]) self.lastconnection=index[2] return index[0] else: raise IndexError('Input to multiwait function is not a list') def startCoverage(self, routines=['*']): self.write('D ^%SYS.MONLBL') rval = self.multiwait(['Stop Monitor', 'Start Monitor']) if rval == 0: self.write('1') self.wait('Start Monitor') self.write('1') elif rval == 1: self.write('1') else: raise TestHelper.TestError('ERROR starting monitor, rbuf: ' + rval) for routine in routines: self.wait('Routine Name') self.write(routine) self.wait('Routine Name', tout=120) self.write('') self.wait('choice') self.write('2') self.wait('choice') self.write('1') self.wait('continue') self.write('\r') def stopCoverage(self, path, humanreadable='OFF'): newpath, filename = os.path.split(path) self.write('D ^%SYS.MONLBL') self.wait('choice') if humanreadable == 'ON': self.write('5') self.wait('summary') self.write('Y') else: self.write('6') self.wait('Routine number') self.write('*') self.wait('FileName') self.write(newpath + '/Coverage/' + filename.replace('.log', '.cmcov').replace('.txt', '.cmcov')) self.wait('continue') self.write('') self.wait('choice') self.write('1\r') class ConnectLinuxCache(ConnectMUMPS): def __init__(self, logfile, instance, namespace, location='127.0.0.1'): super(ConnectMUMPS, self).__init__() self.connection = pexpect.spawn('ccontrol session ' + instance + ' -U ' + namespace, timeout=None) if len(namespace) == 0: namespace = 'VISTA' self.namespace = namespace self.prompt = self.namespace + '>' self.connection.logfile_read = file(logfile, 'w') self.type = 'cache' path,filename = os.path.split(logfile) self.MenuLocation=[] self.lastconnection="" self.optionParentDict = [] self.optionMenuTextDict = [] def write(self, command): self.connection.send(command + '\r') logging.debug('connection.write:' + command) def wait(self, command, tout=15): logging.debug('connection.expect: ' + str(command)) if command is PROMPT: command = self.namespace + '>' rbuf = self.connection.expect_exact(command, tout) if rbuf == -1: logging.debug('ERROR: expected: ' + command) raise TestHelper.TestError('ERROR: expected: ' + command) else: self.lastconnection=self.connection.before return 1 def wait_re(self, command, timeout=15): logging.debug('connection.expect: ' + str(command)) if not timeout: timeout = -1 compCommand = re.compile(command,re.I) self.connection.expect(compCommand, timeout) self.lastconnection=self.connection.before def multiwait(self, options, tout=15): logging.debug('connection.expect: ' + str(options)) if isinstance(options, list): index = self.connection.expect(options, tout) if index == -1: logging.debug('ERROR: expected: ' + options) raise TestHelper.TestError('ERROR: expected: ' + options) self.connection.logfile_read.write(options[index]) self.lastconnection=self.connection.before return index else: raise IndexError('Input to multiwait function is not a list') def startCoverage(self, routines=['*']): self.write('D ^%SYS.MONLBL') rval = self.multiwait(['Stop Monitor', 'Start Monitor']) if rval == 0: self.write('1') self.wait('Start Monitor') self.write('1') elif rval == 1: self.write('1') else: raise TestHelper.TestError('ERROR starting monitor, rbuf: ' + rval) for routine in routines: self.wait('Routine Name') self.write(routine) self.wait('Routine Name', tout=120) self.write('') self.wait('choice') self.write('2') self.wait('choice') self.write('1') self.wait('continue') self.write('\r') def stopCoverage(self, path, humanreadable='OFF'): newpath, filename = os.path.split(path) self.write('D ^%SYS.MONLBL') self.wait('choice') if humanreadable == 'ON': self.write('5') self.wait('summary') self.write('Y') else: self.write('6') self.wait('Routine number') self.write('*') self.wait('FileName') self.write(newpath + '/Coverage/' + filename.replace('.log', '.cmcov').replace('.txt', '.cmcov')) self.wait('continue') self.write('') self.wait('choice') self.write('1\r') class ConnectLinuxGTM(ConnectMUMPS): def __init__(self, logfile, instance, namespace, location='127.0.0.1'): super(ConnectMUMPS, self).__init__() gtm_command = os.getenv('gtm_dist')+'/mumps -dir' self.connection = pexpect.spawn(gtm_command, timeout=None) if len(namespace) == 0: self.prompt = os.getenv("gtm_prompt") if self.prompt == None: self.prompt = "GTM>" self.connection.logfile_read = file(logfile, 'w') self.type = 'GTM' path,filename = os.path.split(logfile) self.MenuLocation=[] self.lastconnection="" self.optionParentDict = [] self.optionMenuTextDict = [] def write(self, command): self.connection.send(command + '\r') logging.debug('connection.write: ' + command) def wait(self, command, tout=15): logging.debug('connection.expect: ' + str(command)) if command is PROMPT: command = self.prompt rbuf = self.connection.expect_exact(command, tout) logging.debug('RECEIVED: ' + command) if rbuf == -1: logging.debug('ERROR: expected: ' + command) raise TestHelper.TestError('ERROR: expected: ' + command) else: self.lastconnection=self.connection.before return 1 def wait_re(self, command, timeout=None): logging.debug('connection.expect: ' + str(command)) if not timeout: timeout = -1 compCommand = re.compile(command,re.I) self.connection.expect(compCommand, timeout) self.lastconnection=self.connection.before def multiwait(self, options, tout=15): logging.debug('connection.expect: ' + str(options)) if isinstance(options, list): index = self.connection.expect(options, tout) if index == -1: logging.debug('ERROR: expected: ' + str(options)) raise TestHelper.TestError('ERROR: expected: ' + str(options)) self.connection.logfile_read.write(options[index]) self.lastconnection=self.connection.before return index else: raise IndexError('Input to multiwait function is not a list') def startCoverage(self, routines=['*']): self.write('K ^ZZCOVERAGE VIEW "TRACE":1:"^ZZCOVERAGE"') def stopCoverage(self, path, humanreadable='OFF'): path, filename = os.path.split(path) self.write('VIEW "TRACE":0:"^ZZCOVERAGE"') self.wait(PROMPT) self.write('D ^%GO') self.wait('Global') self.write('ZZCOVERAGE') self.wait('Global') self.write('') self.wait('Label:') self.write('') self.wait('Format') self.write('ZWR') self.wait('device') self.write(path + '/Coverage/' + filename.replace('.log', '.mcov').replace('.txt', '.mcov')) class ConnectRemoteSSH(ConnectMUMPS): """ This will provide a connection to VistA via SSH. This class handles any remote system (ie: currently there are not multiple versions of it for each remote OS). """ def __init__(self, logfile, instance, namespace, location, remote_conn_details): super(ConnectMUMPS, self).__init__() self.type = str.lower(instance) self.namespace = str.upper(namespace) self.prompt = self.namespace + '>' # Create a new SSH client object client = paramiko.SSHClient() # Set SSH key parameters to auto accept unknown hosts client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # Connect to the host client.connect(hostname=remote_conn_details.remote_address, port=remote_conn_details.remote_port, username=remote_conn_details.username, password=remote_conn_details.password) # Create a client interaction class which will interact with the host from paramikoe import SSHClientInteraction interact = SSHClientInteraction(client, timeout=10, display=False) self.connection = interact self.connection.logfile_read = file(logfile, 'w') self.client = client # apparently there is a deconstructor which disconnects (probably sends a FYN packet) when client is gone def write(self, command): time.sleep(.01) self.connection.send(command + '\r') logging.debug('connection.send:' + command) def wait(self, command, tout=15): time.sleep(.01) logging.debug('connection.expect: ' + str(command)) if command is PROMPT: command = self.namespace + '>' else: command = self.escapeSpecialChars(command) if command == '': command = '.*' # fix for paramiko expect, it does not work with wait('') try: rbuf = self.connection.expect(command, tout) except socket.timeout: rbuf = -1 if rbuf == -1: logging.debug('ERROR: expected: ' + command) print 'ERROR: expected: ' + command raise TestHelper.TestError('ERROR: expected: ' + command) else: return 1 #paramikoe already accept regular expressions as input by default def wait_re(self, command, timeout=30): self.wait(command, timeout) def multiwait(self, options, tout=15): logging.debug('connection.expect: ' + str(options)) temp_options = [] for command in options: temp_options.append(self.escapeSpecialChars(command)) options = temp_options time.sleep(.01) if isinstance(options, list): index = self.connection.expect(options, timeout=tout) if index == -1: logging.debug('ERROR: expected: ' + str(options)) raise TestHelper.TestError('ERROR: expected: ' + str(options)) return index else: raise IndexError('Input to multiwait function is not a list') def startCoverage(self, routines=['*']): if self.type == 'cache': self.write('D ^%SYS.MONLBL') rval = self.multiwait(['Stop Monitor', 'Start Monitor']) if rval == 0: self.write('1') self.wait('Start Monitor') self.write('1') elif rval == 1: self.write('1') else: raise TestHelper.TestError('ERROR starting monitor, rbuf: ' + rval) for routine in routines: self.wait('Routine Name') self.write(routine) self.wait('Routine Name', tout=120) self.write('') self.wait('choice') self.write('2') self.wait('choice') self.write('1') self.wait('continue') self.write('\r') else: self.write('K ^ZZCOVERAGE VIEW "TRACE":1:"^ZZCOVERAGE"') def stopCoverage(self, path): if self.type == 'cache': newpath, filename = os.path.split(path) self.write('D ^%SYS.MONLBL') self.wait('choice') self.write('5') self.wait('summary') self.write('Y') self.wait('FileName') self.write(newpath + '/' + filename.replace('.log', '.cmcov')) self.wait('continue') self.write('') self.wait('choice') self.write('1\r') else: path, filename = os.path.split(path) self.write('VIEW "TRACE":0:"^ZZCOVERAGE"') self.wait(PROMPT) self.write('D ^%GO') self.wait('Global') self.write('ZZCOVERAGE') self.wait('Global') self.write('') self.wait('Label:') self.write('') self.wait('Format') self.write('ZWR') self.wait('device') self.write(path + '/' + filename.replace('.log', '.mcov')) """ Added to convert regex's into regular string matching. It replaces special characters such as '?' into '\?' """ def escapeSpecialChars(self, string): re_chars = '?*.+-|^$\()[]{}' escaped_str = '' for c in string: if c in re_chars: escaped_str = escaped_str + '\\' escaped_str += c return escaped_str def ConnectToMUMPS(logfile, instance='CACHE', namespace='VISTA', location='127.0.0.1', remote_conn_details=None): # self.namespace = namespace # self.location = location # print "You are using " + sys.platform # remote connections if remote_conn_details is not None: if no_paramiko: raise no_paramiko return ConnectRemoteSSH(logfile, instance, namespace, location, remote_conn_details) # local connections if sys.platform == 'win32': return ConnectWinCache(logfile, instance, namespace, location) elif sys.platform == 'linux2': if no_pexpect: raise no_pexpect if os.getenv('gtm_dist'): try: return ConnectLinuxGTM(logfile, instance, namespace, location) except pexpect.ExceptionPexpect, no_gtm: if (no_gtm): raise "Cannot find a MUMPS instance" else: try: return ConnectLinuxCache(logfile, instance, namespace, location) except pexpect.ExceptionPexpect, no_cache: if (no_cache): raise "Cannot find a MUMPS instance"
""" Perceptual decision-making with postdecision wagering, based on Representation of confidence associated with a decision by neurons in the parietal cortex. R. Kiani & M. N. Shadlen, Science 2009. http://dx.doi.org/10.1126/science.1169405 """ from __future__ import division import numpy as np from pyrl import tasktools # Inputs inputs = tasktools.to_map('FIXATION', 'LEFT', 'RIGHT', 'SURE') # Actions actions = tasktools.to_map('FIXATE', 'CHOOSE-LEFT', 'CHOOSE-RIGHT', 'CHOOSE-SURE') # Trial conditions wagers = [True, False] left_rights = [-1, 1] cohs = [0, 3.2, 6.4, 12.8, 25.6, 51.2] n_conditions = len(wagers) * len(left_rights) * len(cohs) # Training n_gradient = n_conditions n_validation = 50*n_conditions # Input noise sigma = np.sqrt(2*100*0.01) N = 200 # Separate inputs N = 100 Wins = [] for i in xrange(3): Win = np.zeros((len(inputs), N)) Win[inputs['FIXATION']] = 1 Win[inputs['LEFT'], :N//2] = 1 Win[inputs['RIGHT'],:N//2] = 1 Win[inputs['SURE'], N//2:] = 1 Wins.append(Win) Win = np.concatenate(Wins, axis=1) # Durations fixation = 750 stimulus_min = 100 stimulus_mean = 180 stimulus_max = 800 delay_min = 1200 delay_mean = 1350 delay_max = 1800 sure_min = 500 sure_mean = 575 sure_max = 750 decision = 500 tmax = fixation + stimulus_min + stimulus_max + delay_max + decision # Rewards R_ABORTED = -1 R_CORRECT = +1 R_SURE = 0.7*R_CORRECT # Input scaling def scale(coh): return (1 + coh/100)/2 def get_condition(rng, dt, context={}): #------------------------------------------------------------------------------------- # Wager or no wager? #------------------------------------------------------------------------------------- wager = context.get('wager') if wager is None: wager = rng.choice(wagers) #------------------------------------------------------------------------------------- # Epochs #------------------------------------------------------------------------------------- stimulus = context.get('stimulus') if stimulus is None: stimulus = stimulus_min + tasktools.truncated_exponential(rng, dt, stimulus_mean, xmax=stimulus_max) delay = context.get('delay') if delay is None: delay = tasktools.truncated_exponential(rng, dt, delay_mean, xmin=delay_min, xmax=delay_max) if wager: sure_onset = context.get('sure_onset') if sure_onset is None: sure_onset = tasktools.truncated_exponential(rng, dt, sure_mean, xmin=sure_min, xmax=sure_max) durations = { 'fixation': (0, fixation), 'stimulus': (fixation, fixation + stimulus), 'delay': (fixation + stimulus, fixation + stimulus + delay), 'decision': (fixation + stimulus + delay, tmax), 'tmax': tmax } if wager: durations['sure'] = (fixation + stimulus + sure_onset, tmax) time, epochs = tasktools.get_epochs_idx(dt, durations) #------------------------------------------------------------------------------------- # Trial #------------------------------------------------------------------------------------- left_right = context.get('left_right') if left_right is None: left_right = rng.choice(left_rights) coh = context.get('coh') if coh is None: coh = rng.choice(cohs) return { 'durations': durations, 'time': time, 'epochs': epochs, 'wager': wager, 'left_right': left_right, 'coh': coh } def get_step(rng, dt, trial, t, a): #------------------------------------------------------------------------------------- # Reward #------------------------------------------------------------------------------------- epochs = trial['epochs'] status = {'continue': True} reward = 0 if t-1 not in epochs['decision']: if a != actions['FIXATE']: status['continue'] = False reward = R_ABORTED elif t-1 in epochs['decision']: if a == actions['CHOOSE-LEFT']: status['continue'] = False status['choice'] = 'L' status['t_choice'] = t-1 status['correct'] = (trial['left_right'] < 0) if status['correct']: reward = R_CORRECT elif a == actions['CHOOSE-RIGHT']: status['continue'] = False status['choice'] = 'R' status['t_choice'] = t-1 status['correct'] = (trial['left_right'] > 0) if status['correct']: reward = R_CORRECT elif a == actions['CHOOSE-SURE']: status['continue'] = False if trial['wager']: status['choice'] = 'S' status['t_choice'] = t-1 reward = R_SURE else: reward = R_ABORTED #------------------------------------------------------------------------------------- # Inputs #------------------------------------------------------------------------------------- if trial['left_right'] < 0: high = inputs['LEFT'] low = inputs['RIGHT'] else: high = inputs['RIGHT'] low = inputs['LEFT'] u = np.zeros(len(inputs)) if t in epochs['fixation'] or t in epochs['stimulus'] or t in epochs['delay']: u[inputs['FIXATION']] = 1 if t in epochs['stimulus']: u[high] = scale(+trial['coh']) + rng.normal(scale=sigma)/np.sqrt(dt) u[low] = scale(-trial['coh']) + rng.normal(scale=sigma)/np.sqrt(dt) if trial['wager'] and t in epochs['sure']: u[inputs['SURE']] = 1 #------------------------------------------------------------------------------------- return u, reward, status from pyrl.performance import PerformancePostdecisionWager as Performance def terminate(perf): p_answer = perf.n_answer/perf.n_trials p_correct = tasktools.divide(perf.n_correct, perf.n_decision) p_sure = tasktools.divide(perf.n_sure, perf.n_sure_decision) return p_answer >= 0.99 and p_correct >= 0.79 and 0.4 < p_sure <= 0.5
import cPickle as pickle from hashlib import md5 from datetime import datetime from functools import partial from django.db import models, router from django.db.models.sql import RawQuery from django.db.models import signals as model_signals from django.core.paginator import EmptyPage, InvalidPage from django.contrib.auth.models import User from django.core.cache import cache as dcache from forkit.models import ForkableModel from forkit import signals from avocado.conf import settings from avocado.models import Field from avocado.store.fields import JSONField from avocado.modeltree import DEFAULT_MODELTREE_ALIAS, trees from avocado.fields import logictree from avocado.columns.cache import cache as column_cache from avocado.columns import utils, format from avocado.utils.paginator import BufferedPaginator from avocado.store import receivers __all__ = ('Scope', 'Perspective', 'Report', 'ObjectSet', 'ObjectSetJoinThrough') PAGE = 1 PAGINATE_BY = 10 CACHE_CHUNK_SIZE = 500 DEFAULT_COLUMNS = getattr(settings, 'COLUMNS', ()) DEFAULT_ORDERING = getattr(settings, 'COLUMN_ORDERING', ()) class Descriptor(ForkableModel): user = models.ForeignKey(User, null=True) name = models.CharField(max_length=100, null=True) description = models.TextField(null=True) keywords = models.CharField(max_length=100, null=True) created = models.DateTimeField(default=datetime.now) modified = models.DateTimeField(default=datetime.now) # explicitly denotes an instance for use in a session session = models.BooleanField(default=False) class Meta(object): abstract = True app_label = 'avocado' def save(self, *args, **kwargs): # since we want to mimic the reference object, we don't want to # update the modified date on save, otherwise they would not be # in a consistent state. this condition will never be true for # object's without a reference. if self.has_changed(): self.modified = datetime.now() super(Descriptor, self).save(*args, **kwargs) def __unicode__(self): if self.session: return u'{0} (session)'.format(self.name or self.pk) return u'{0}'.format(self.name or self.pk) def get_reference_pk(self): if self.reference: return self.reference.pk def references(self, pk): "Compares the reference primary key to the one passed." if self.reference: return self.reference.pk == int(pk) def deference(self, delete=False): if self.reference and delete: self.reference.delete() self.__class__().reset(self) self.reference = None self.save() def diff(self, instance=None, **kwargs): "Override diff to default to ``reference`` if no instance is sepcified." if not instance: if not self.reference: return None instance = self.reference return super(Descriptor, self).diff(instance, **kwargs) def push(self): "Pushes changes from this object to the reference, if one exists." self.reset(self.reference) def has_changed(self): return bool(self.diff()) class Context(Descriptor): """A generic interface for storing an arbitrary context around the data model. The object defining the context must be serializable. """ store = JSONField(null=True) timestamp = models.DateTimeField(editable=False, auto_now=True, default=datetime.now) class Meta(object): abstract = True app_label = 'avocado' def _get_obj(self, obj=None): if obj is None: return self.store or {} return obj def _get_contents(self, obj): """A ``Context`` is driven by the abstraction layer of the ``Field``, ``Criterion`` and ``Column`` classes. Each ``obj`` will be empty or contain data (like primary keys) referring to objects of the former mentioned classes. Returns a list of ``Field`` primary keys. """ pass def _parse_contents(self, obj, *args, **kwargs): """Encapsulates any processing that must be performed on ``obj`` and returns a function that takes a queryset and returns a queryset. """ pass def cache_is_valid(self, timestamp=None): if timestamp and timestamp > self.timestamp: return True return False def is_valid(self, obj): """Takes an object and determines if the data structure is valid for this particular context. """ if isinstance(obj, dict): return True return False def read(self): return self._get_obj() def write(self, obj=None, *args, **kwargs): obj = self._get_obj(obj) self.store = obj self.timestamp = datetime.now() def has_permission(self, obj=None, user=None): obj = self._get_obj(obj) field_ids = set([int(i) for i in self._get_contents(obj)]) # if not requesting to see anything, early exit if not field_ids: return True fields = Field.objects.public(user) # filter down to requested fields ids = set(fields.values('id').filter(id__in=field_ids).values_list('id', flat=True)) if len(ids) != len(field_ids) or not all([i in field_ids for i in ids]): return False return True def get_queryset(self, obj=None, queryset=None, using=DEFAULT_MODELTREE_ALIAS, *args, **kwargs): obj = self._get_obj(obj) if queryset is None: queryset = trees[using].get_queryset() func = self._parse_contents(obj, using=using, *args, **kwargs) queryset = func(queryset, *args, **kwargs) return queryset class Scope(Context): "Stores information needed to provide scope to data." count = models.IntegerField(null=True, editable=False, db_column='cnt') # used for book keeping. if a reference exists, this implies this instance # has represents another context. reference = models.ForeignKey('self', null=True) def _get_contents(self, obj): self._node = logictree.transform(obj) return self._node.get_field_ids() def _parse_contents(self, obj, *args, **kwargs): self._node = logictree.transform(obj) return self._node.apply def is_valid(self, obj): if hasattr(self, '_node'): del self._node return super(Scope, self).is_valid(obj) def save(self, *args, **kwargs): self.count = self.get_queryset().distinct().count() super(Scope, self).save(*args, **kwargs) @property def conditions(self): return logictree.transform(self._get_obj()).text class Perspective(Context): # used for book keeping. if a reference exists, this implies this instance # has represents another context. reference = models.ForeignKey('self', null=True) def _get_obj(self, obj=None): obj = obj or {} if self.store is not None: copy = self.store.copy() else: copy = {} copy.update(obj) # supply default values if not copy.has_key('columns'): copy['columns'] = list(DEFAULT_COLUMNS) if not copy.has_key('ordering'): copy['ordering'] = list(DEFAULT_ORDERING) copy['columns'] = [int(x) for x in copy['columns']] copy['ordering'] = [(int(x), y) for x, y in copy['ordering']] # ordering of a column cannot exist when the column is not present for i, (x, y) in enumerate(iter(copy['ordering'])): if x not in copy['columns']: copy['ordering'].pop(i) return copy def _get_contents(self, obj): ids = obj['columns'] + [x for x,y in obj['ordering']] # saves a query if not ids: return ids # get all field ids associated with requested columns return Field.objects.filter(column__id__in=set(ids)).values_list('id', flat=True) def _parse_contents(self, obj, *args, **kwargs): def func(queryset, columns=[], ordering=[], *args, **kwargs): queryset = utils.add_columns(queryset, columns, *args, **kwargs) queryset = utils.add_ordering(queryset, ordering, *args, **kwargs) return queryset return partial(func, columns=obj['columns'], ordering=obj['ordering']) def header(self): store = self.read() header = [] for x in store['columns']: c = column_cache.get(x) if c is None: continue o = {'id': x, 'name': c.name, 'direction': ''} for y, z in store['ordering']: if x == y: o['direction'] = z break header.append(o) return header def get_columns_as_fields(self): store = self.read() header = [] for x in store['columns']: c = column_cache.get(x) cfields = c.conceptfields.select_related('field').order_by('order') if len(cfields) == 1: header.append(c.name) else: header.extend([x.name or x.field.name for x in cfields]) return header def format(self, iterable, format_type): store = self.read() rules = utils.column_format_rules(store['columns'], format_type) return format.library.format(iterable, rules, format_type) class Report(Descriptor): "Represents a combination ``scope`` and ``perspective``." REPORT_CACHE_KEY = 'reportcache' scope = models.OneToOneField(Scope) perspective = models.OneToOneField(Perspective) # used for book keeping. if a reference exists, this implies this instance # has represents another context. reference = models.ForeignKey('self', null=True) count = models.IntegerField(null=True, editable=False, db_column='cnt') def _center_cache_offset(self, count, offset, buf_size=CACHE_CHUNK_SIZE): """The ``offset`` will be relative to the next requested row. To ensure a true 'sliding window' of data, the offset must be adjusted to be:: offset - (buf_size / 2) The edge cases will be relative to the min (0) and max number of rows that exist. """ mid = int(buf_size / 2.0) # lower bound if (offset - mid) < 0: offset = 0 # upper bound elif (offset + mid) > count: offset = count - buf_size # in the middle else: offset = offset - mid return offset def _set_queryset_offset_limit(self, queryset, offset, limit): lower = offset upper = offset + limit return queryset[lower:upper] def _execute_raw_query(self, queryset): """Take a ``QuerySet`` object and executes it. No customization or processing of the query should take place here. """ using = router.db_for_read(queryset.model) sql, params = queryset.query.get_compiler(using).as_sql() raw = RawQuery(sql, using, params) raw._execute_query() return raw.cursor.fetchall() def deference(self, delete=False): self.scope.deference() self.perspective.deference() if self.reference and delete: # don't pass `delete' param since a signal receiver cleans # up the database self.reference.delete() self.__class__().reset(self) self.reference = None self.save() def paginator_and_page(self, cache, buf_size=CACHE_CHUNK_SIZE): paginator = BufferedPaginator(count=cache['count'], offset=cache['offset'], buf_size=buf_size, per_page=cache['per_page']) try: page = paginator.page(cache['page_num']) except (EmptyPage, InvalidPage): page = paginator.page(paginator.num_pages) return paginator, page def get_datakey(self, request): return md5(request.session._session_key + 'data').hexdigest() def cache_is_valid(self, timestamp=None): if self.scope.cache_is_valid(timestamp) and \ self.perspective.cache_is_valid(timestamp): return True return False # in it's current implementation, this will try to get the requested # page from cache, or re-execute the query and store off the new cache def get_page_from_cache(self, cache, buf_size=CACHE_CHUNK_SIZE): paginator, page = self.paginator_and_page(cache, buf_size) # now we can fetch the data if page.in_cache(): data = dcache.get(cache['datakey']) if data is not None: return page.get_list(pickle.loads(data)) def refresh_cache(self, cache, queryset, adjust_offset=True, buf_size=CACHE_CHUNK_SIZE): """Does not utilize existing cache if it exists. This is an implied cache invalidation mechanism. """ paginator, page = self.paginator_and_page(cache, buf_size) queryset = self._set_queryset_offset_limit(queryset, cache['offset'], buf_size) # since the page is not in cache new data must be requested, therefore # the offset should be re-centered relative to the page offset if adjust_offset: cache['offset'] = self._center_cache_offset(cache['count'], page.offset(), buf_size) data = self._execute_raw_query(queryset) dcache.set(cache['datakey'], pickle.dumps(data)) paginator.offset = cache['offset'] paginator.object_list = data try: page = paginator.page(cache['page_num']) except (EmptyPage, InvalidPage): page = paginator.page(paginator.num_pages) assert page.in_cache() return page.get_list() def update_cache(self, cache, queryset, buf_size=CACHE_CHUNK_SIZE): """Tries to use cache if it exists, this implies that the cache is still valid and a page that is not in cache has been requested. """ paginator, page = self.paginator_and_page(cache, buf_size) # since the page is not in cache new data must be requested, therefore # the offset should be re-centered relative to the page offset cache['offset'] = self._center_cache_offset(cache['count'], page.offset(), buf_size) # determine any overlap between what we have with ``cached_rows`` and # what the ``page`` requires. has_overlap, start_term, end_term = paginator.get_overlap(cache['offset']) # we can run a partial query and use some of the existing rows for our # updated cache if has_overlap is False: queryset = self._set_queryset_offset_limit(queryset, *start_term) data = self._execute_raw_query(queryset) else: rdata = dcache.get(cache['datakey']) if rdata is None: return self.refresh_cache(cache, queryset, adjust_offset=False, buf_size=buf_size) data = pickle.loads(rdata) # check to see if there is partial data to be prepended if start_term[0] is not None: tmp = self._set_queryset_offset_limit(queryset, *start_term) partial_data = self._execute_raw_query(tmp) data = partial_data + data[:-start_term[1]] # check to see if there is partial data to be appended if end_term[0] is not None: tmp = self._set_queryset_offset_limit(queryset, *end_term) partial_data = self._execute_raw_query(tmp) data = data[end_term[1]:] + partial_data dcache.set(cache['datakey'], pickle.dumps(data)) paginator.offset = cache['offset'] paginator.object_list = data page = paginator.page(cache['page_num']) assert page.in_cache() return page.get_list() def _get_count(self, queryset): tmp = queryset.all() tmp.query.clear_ordering(True) return tmp.count() def get_queryset(self, timestamp=None, using=DEFAULT_MODELTREE_ALIAS, **context): """Returns a ``QuerySet`` object that is generated from the ``scope`` and ``perspective`` objects bound to this report. This should not be used directly when requesting data since it does not utlize the cache layer. """ unique = count = None queryset = trees[using].get_queryset() queryset = queryset.values(queryset.model._meta.pk.name).distinct() # first argument is ``None`` since we want to use the session objects queryset = self.scope.get_queryset(None, queryset, using=using, **context) unique = self._get_count(queryset) queryset = self.perspective.get_queryset(None, queryset, using=using) count = self._get_count(queryset) if self.count != count: self.count = count self.save() return queryset, unique, count def has_permission(self, user): # ensure the requesting user has permission to view the contents of # both the ``scope`` and ``perspective`` objects # TODO add per-user caching for report objects if self.scope.has_permission(user=user) and self.perspective.has_permission(user=user): return True return False def has_changed(self): return self.scope.has_changed() or self.perspective.has_changed() class ObjectSet(Descriptor): """ Provides a means of saving off a set of objects. `criteria' is persisted so the original can be rebuilt. `removed_ids' is persisted to know which objects have been excluded explicitly from the set. This could be useful when testing for if there are new objects available when additional data has been loaded, while still excluding the removed objects. `ObjectSet' must be subclassed to add the many-to-many relationship to the "object" of interest. """ scope = models.OneToOneField(Scope, editable=False) count = models.PositiveIntegerField(null=True, editable=False, db_column='cnt') class Meta(object): abstract = True def save(self, *args, **kwargs): if not self.created: self.created = datetime.now() self.modified = datetime.now() super(ObjectSet, self).save(*args, **kwargs) class ObjectSetJoinThrough(models.Model): """ Adds additional information about the objects that have been ``added`` and ``removed`` from the original set. For instance, additional objects that are added which do not match the conditions currently associated with the ``ObjectSet`` should be flagged as ``added``. If in the future they match the conditions, the flag can be removed. Any objects that are removed from the set should be marked as ``removed`` even if they were added at one time. This is too keep track of the objects that have been explicitly removed from the set. """ removed = models.BooleanField(default=False) added = models.BooleanField(default=False) class Meta(object): abstract = True signals.pre_diff.connect(receivers.descriptor_pre_diff, sender=Scope) signals.pre_reset.connect(receivers.descriptor_pre_reset, sender=Scope) signals.pre_fork.connect(receivers.descriptor_pre_fork, sender=Scope) signals.post_fork.connect(receivers.descriptor_post_fork, sender=Scope) signals.pre_commit.connect(receivers.descriptor_pre_commit, sender=Scope) signals.post_commit.connect(receivers.descriptor_post_commit, sender=Scope) signals.pre_diff.connect(receivers.descriptor_pre_diff, sender=Perspective) signals.pre_reset.connect(receivers.descriptor_pre_reset, sender=Perspective) signals.pre_fork.connect(receivers.descriptor_pre_fork, sender=Perspective) signals.post_fork.connect(receivers.descriptor_post_fork, sender=Perspective) signals.pre_commit.connect(receivers.descriptor_pre_commit, sender=Perspective) signals.post_commit.connect(receivers.descriptor_post_commit, sender=Perspective) signals.pre_diff.connect(receivers.report_pre_diff, sender=Report) signals.pre_reset.connect(receivers.report_pre_reset, sender=Report) signals.pre_fork.connect(receivers.report_pre_fork, sender=Report) signals.post_fork.connect(receivers.descriptor_post_fork, sender=Report) signals.pre_commit.connect(receivers.descriptor_pre_commit, sender=Report) signals.post_commit.connect(receivers.descriptor_post_commit, sender=Report) model_signals.post_delete.connect(receivers.report_post_delete, sender=Report)
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import shutil import itertools import unittest import mock import random import sqlite3 from swift.common import db_replicator from swift.container import replicator, backend, server, sync_store from swift.container.reconciler import ( MISPLACED_OBJECTS_ACCOUNT, get_reconciler_container_name) from swift.common.utils import Timestamp, encode_timestamps from swift.common.storage_policy import POLICIES from test.unit.common import test_db_replicator from test.unit import patch_policies, make_timestamp_iter, mock_check_drive from contextlib import contextmanager @patch_policies class TestReplicatorSync(test_db_replicator.TestReplicatorSync): backend = backend.ContainerBroker datadir = server.DATADIR replicator_daemon = replicator.ContainerReplicator replicator_rpc = replicator.ContainerReplicatorRpc def test_report_up_to_date(self): broker = self._get_broker('a', 'c', node_index=0) broker.initialize(Timestamp(1).internal, int(POLICIES.default)) info = broker.get_info() broker.reported(info['put_timestamp'], info['delete_timestamp'], info['object_count'], info['bytes_used']) full_info = broker.get_replication_info() expected_info = {'put_timestamp': Timestamp(1).internal, 'delete_timestamp': '0', 'count': 0, 'bytes_used': 0, 'reported_put_timestamp': Timestamp(1).internal, 'reported_delete_timestamp': '0', 'reported_object_count': 0, 'reported_bytes_used': 0} for key, value in expected_info.items(): msg = 'expected value for %r, %r != %r' % ( key, full_info[key], value) self.assertEqual(full_info[key], value, msg) repl = replicator.ContainerReplicator({}) self.assertTrue(repl.report_up_to_date(full_info)) full_info['delete_timestamp'] = Timestamp(2).internal self.assertFalse(repl.report_up_to_date(full_info)) full_info['reported_delete_timestamp'] = Timestamp(2).internal self.assertTrue(repl.report_up_to_date(full_info)) full_info['count'] = 1 self.assertFalse(repl.report_up_to_date(full_info)) full_info['reported_object_count'] = 1 self.assertTrue(repl.report_up_to_date(full_info)) full_info['bytes_used'] = 1 self.assertFalse(repl.report_up_to_date(full_info)) full_info['reported_bytes_used'] = 1 self.assertTrue(repl.report_up_to_date(full_info)) full_info['put_timestamp'] = Timestamp(3).internal self.assertFalse(repl.report_up_to_date(full_info)) full_info['reported_put_timestamp'] = Timestamp(3).internal self.assertTrue(repl.report_up_to_date(full_info)) def test_sync_remote_in_sync(self): # setup a local container broker = self._get_broker('a', 'c', node_index=0) put_timestamp = time.time() broker.initialize(put_timestamp, POLICIES.default.idx) # "replicate" to same database node = {'device': 'sdb', 'replication_ip': '127.0.0.1'} daemon = replicator.ContainerReplicator({}) # replicate part, node = self._get_broker_part_node(broker) info = broker.get_replication_info() success = daemon._repl_to_node(node, broker, part, info) # nothing to do self.assertTrue(success) self.assertEqual(1, daemon.stats['no_change']) def test_sync_remote_with_timings(self): ts_iter = make_timestamp_iter() # setup a local container broker = self._get_broker('a', 'c', node_index=0) put_timestamp = next(ts_iter) broker.initialize(put_timestamp.internal, POLICIES.default.idx) broker.update_metadata( {'x-container-meta-test': ('foo', put_timestamp.internal)}) # setup remote container remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(next(ts_iter).internal, POLICIES.default.idx) timestamp = next(ts_iter) for db in (broker, remote_broker): db.put_object( '/a/c/o', timestamp.internal, 0, 'content-type', 'etag', storage_policy_index=db.storage_policy_index) # replicate daemon = replicator.ContainerReplicator({}) part, node = self._get_broker_part_node(remote_broker) info = broker.get_replication_info() with mock.patch.object(db_replicator, 'DEBUG_TIMINGS_THRESHOLD', -1): success = daemon._repl_to_node(node, broker, part, info) # nothing to do self.assertTrue(success) self.assertEqual(1, daemon.stats['no_change']) expected_timings = ('info', 'update_metadata', 'merge_timestamps', 'get_sync', 'merge_syncs') debug_lines = self.rpc.logger.logger.get_lines_for_level('debug') self.assertEqual(len(expected_timings), len(debug_lines), 'Expected %s debug lines but only got %s: %s' % (len(expected_timings), len(debug_lines), debug_lines)) for metric in expected_timings: expected = 'replicator-rpc-sync time for %s:' % metric self.assertTrue(any(expected in line for line in debug_lines), 'debug timing %r was not in %r' % ( expected, debug_lines)) def test_sync_remote_missing(self): broker = self._get_broker('a', 'c', node_index=0) put_timestamp = time.time() broker.initialize(put_timestamp, POLICIES.default.idx) # "replicate" part, node = self._get_broker_part_node(broker) daemon = self._run_once(node) # complete rsync to all other nodes self.assertEqual(2, daemon.stats['rsync']) for i in range(1, 3): remote_broker = self._get_broker('a', 'c', node_index=i) self.assertTrue(os.path.exists(remote_broker.db_file)) remote_info = remote_broker.get_info() local_info = self._get_broker( 'a', 'c', node_index=0).get_info() for k, v in local_info.items(): if k == 'id': continue self.assertEqual(remote_info[k], v, "mismatch remote %s %r != %r" % ( k, remote_info[k], v)) def test_rsync_failure(self): broker = self._get_broker('a', 'c', node_index=0) put_timestamp = time.time() broker.initialize(put_timestamp, POLICIES.default.idx) # "replicate" to different device daemon = replicator.ContainerReplicator({}) def _rsync_file(*args, **kwargs): return False daemon._rsync_file = _rsync_file # replicate part, local_node = self._get_broker_part_node(broker) node = random.choice([n for n in self._ring.devs if n['id'] != local_node['id']]) info = broker.get_replication_info() with mock_check_drive(ismount=True): success = daemon._repl_to_node(node, broker, part, info) self.assertFalse(success) def test_sync_remote_missing_most_rows(self): put_timestamp = time.time() # create "local" broker broker = self._get_broker('a', 'c', node_index=0) broker.initialize(put_timestamp, POLICIES.default.idx) # create "remote" broker remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(put_timestamp, POLICIES.default.idx) # add a row to "local" db broker.put_object('/a/c/o', time.time(), 0, 'content-type', 'etag', storage_policy_index=broker.storage_policy_index) # replicate node = {'device': 'sdc', 'replication_ip': '127.0.0.1'} daemon = replicator.ContainerReplicator({'per_diff': 1}) def _rsync_file(db_file, remote_file, **kwargs): remote_server, remote_path = remote_file.split('/', 1) dest_path = os.path.join(self.root, remote_path) shutil.copy(db_file, dest_path) return True daemon._rsync_file = _rsync_file part, node = self._get_broker_part_node(remote_broker) info = broker.get_replication_info() success = daemon._repl_to_node(node, broker, part, info) self.assertTrue(success) # row merge self.assertEqual(1, daemon.stats['remote_merge']) local_info = self._get_broker( 'a', 'c', node_index=0).get_info() remote_info = self._get_broker( 'a', 'c', node_index=1).get_info() for k, v in local_info.items(): if k == 'id': continue self.assertEqual(remote_info[k], v, "mismatch remote %s %r != %r" % ( k, remote_info[k], v)) def test_sync_remote_missing_one_rows(self): put_timestamp = time.time() # create "local" broker broker = self._get_broker('a', 'c', node_index=0) broker.initialize(put_timestamp, POLICIES.default.idx) # create "remote" broker remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(put_timestamp, POLICIES.default.idx) # add some rows to both db for i in range(10): put_timestamp = time.time() for db in (broker, remote_broker): path = '/a/c/o_%s' % i db.put_object(path, put_timestamp, 0, 'content-type', 'etag', storage_policy_index=db.storage_policy_index) # now a row to the "local" broker only broker.put_object('/a/c/o_missing', time.time(), 0, 'content-type', 'etag', storage_policy_index=broker.storage_policy_index) # replicate daemon = replicator.ContainerReplicator({}) part, node = self._get_broker_part_node(remote_broker) info = broker.get_replication_info() success = daemon._repl_to_node(node, broker, part, info) self.assertTrue(success) # row merge self.assertEqual(1, daemon.stats['diff']) local_info = self._get_broker( 'a', 'c', node_index=0).get_info() remote_info = self._get_broker( 'a', 'c', node_index=1).get_info() for k, v in local_info.items(): if k == 'id': continue self.assertEqual(remote_info[k], v, "mismatch remote %s %r != %r" % ( k, remote_info[k], v)) def test_sync_remote_can_not_keep_up(self): put_timestamp = time.time() # create "local" broker broker = self._get_broker('a', 'c', node_index=0) broker.initialize(put_timestamp, POLICIES.default.idx) # create "remote" broker remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(put_timestamp, POLICIES.default.idx) # add some rows to both db's for i in range(10): put_timestamp = time.time() for db in (broker, remote_broker): obj_name = 'o_%s' % i db.put_object(obj_name, put_timestamp, 0, 'content-type', 'etag', storage_policy_index=db.storage_policy_index) # setup REPLICATE callback to simulate adding rows during merge_items missing_counter = itertools.count() def put_more_objects(op, *args): if op != 'merge_items': return path = '/a/c/o_missing_%s' % next(missing_counter) broker.put_object(path, time.time(), 0, 'content-type', 'etag', storage_policy_index=db.storage_policy_index) test_db_replicator.FakeReplConnection = \ test_db_replicator.attach_fake_replication_rpc( self.rpc, replicate_hook=put_more_objects) db_replicator.ReplConnection = test_db_replicator.FakeReplConnection # and add one extra to local db to trigger merge_items put_more_objects('merge_items') # limit number of times we'll call merge_items daemon = replicator.ContainerReplicator({'max_diffs': 10}) # replicate part, node = self._get_broker_part_node(remote_broker) info = broker.get_replication_info() success = daemon._repl_to_node(node, broker, part, info) self.assertFalse(success) # back off on the PUTs during replication... FakeReplConnection = test_db_replicator.attach_fake_replication_rpc( self.rpc, replicate_hook=None) db_replicator.ReplConnection = FakeReplConnection # retry replication info = broker.get_replication_info() success = daemon._repl_to_node(node, broker, part, info) self.assertTrue(success) # row merge self.assertEqual(2, daemon.stats['diff']) self.assertEqual(1, daemon.stats['diff_capped']) local_info = self._get_broker( 'a', 'c', node_index=0).get_info() remote_info = self._get_broker( 'a', 'c', node_index=1).get_info() for k, v in local_info.items(): if k == 'id': continue self.assertEqual(remote_info[k], v, "mismatch remote %s %r != %r" % ( k, remote_info[k], v)) def test_diff_capped_sync(self): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) put_timestamp = next(ts) # start off with with a local db that is way behind broker = self._get_broker('a', 'c', node_index=0) broker.initialize(put_timestamp, POLICIES.default.idx) for i in range(50): broker.put_object( 'o%s' % i, next(ts), 0, 'content-type-old', 'etag', storage_policy_index=broker.storage_policy_index) # remote primary db has all the new bits... remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(put_timestamp, POLICIES.default.idx) for i in range(100): remote_broker.put_object( 'o%s' % i, next(ts), 0, 'content-type-new', 'etag', storage_policy_index=remote_broker.storage_policy_index) # except there's *one* tiny thing in our local broker that's newer broker.put_object( 'o101', next(ts), 0, 'content-type-new', 'etag', storage_policy_index=broker.storage_policy_index) # setup daemon with smaller per_diff and max_diffs part, node = self._get_broker_part_node(broker) daemon = self._get_daemon(node, conf_updates={'per_diff': 10, 'max_diffs': 3}) self.assertEqual(daemon.per_diff, 10) self.assertEqual(daemon.max_diffs, 3) # run once and verify diff capped self._run_once(node, daemon=daemon) self.assertEqual(1, daemon.stats['diff']) self.assertEqual(1, daemon.stats['diff_capped']) # run again and verify fully synced self._run_once(node, daemon=daemon) self.assertEqual(1, daemon.stats['diff']) self.assertEqual(0, daemon.stats['diff_capped']) # now that we're synced the new item should be in remote db remote_names = set() for item in remote_broker.list_objects_iter(500, '', '', '', ''): name, ts, size, content_type, etag = item remote_names.add(name) self.assertEqual(content_type, 'content-type-new') self.assertTrue('o101' in remote_names) self.assertEqual(len(remote_names), 101) self.assertEqual(remote_broker.get_info()['object_count'], 101) def test_sync_status_change(self): # setup a local container broker = self._get_broker('a', 'c', node_index=0) put_timestamp = time.time() broker.initialize(put_timestamp, POLICIES.default.idx) # setup remote container remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(put_timestamp, POLICIES.default.idx) # delete local container broker.delete_db(time.time()) # replicate daemon = replicator.ContainerReplicator({}) part, node = self._get_broker_part_node(remote_broker) info = broker.get_replication_info() success = daemon._repl_to_node(node, broker, part, info) # nothing to do self.assertTrue(success) self.assertEqual(1, daemon.stats['no_change']) # status in sync self.assertTrue(remote_broker.is_deleted()) info = broker.get_info() remote_info = remote_broker.get_info() self.assertTrue(Timestamp(remote_info['status_changed_at']) > Timestamp(remote_info['put_timestamp']), 'remote status_changed_at (%s) is not ' 'greater than put_timestamp (%s)' % ( remote_info['status_changed_at'], remote_info['put_timestamp'])) self.assertTrue(Timestamp(remote_info['status_changed_at']) > Timestamp(info['status_changed_at']), 'remote status_changed_at (%s) is not ' 'greater than local status_changed_at (%s)' % ( remote_info['status_changed_at'], info['status_changed_at'])) @contextmanager def _wrap_merge_timestamps(self, broker, calls): def fake_merge_timestamps(*args, **kwargs): calls.append(args[0]) orig_merge_timestamps(*args, **kwargs) orig_merge_timestamps = broker.merge_timestamps broker.merge_timestamps = fake_merge_timestamps try: yield True finally: broker.merge_timestamps = orig_merge_timestamps def test_sync_merge_timestamps(self): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) # setup a local container broker = self._get_broker('a', 'c', node_index=0) put_timestamp = next(ts) broker.initialize(put_timestamp, POLICIES.default.idx) # setup remote container remote_broker = self._get_broker('a', 'c', node_index=1) remote_put_timestamp = next(ts) remote_broker.initialize(remote_put_timestamp, POLICIES.default.idx) # replicate, expect call to merge_timestamps on remote and local daemon = replicator.ContainerReplicator({}) part, node = self._get_broker_part_node(remote_broker) info = broker.get_replication_info() local_calls = [] remote_calls = [] with self._wrap_merge_timestamps(broker, local_calls): with self._wrap_merge_timestamps(broker, remote_calls): success = daemon._repl_to_node(node, broker, part, info) self.assertTrue(success) self.assertEqual(1, len(remote_calls)) self.assertEqual(1, len(local_calls)) self.assertEqual(remote_put_timestamp, broker.get_info()['put_timestamp']) self.assertEqual(remote_put_timestamp, remote_broker.get_info()['put_timestamp']) # replicate again, no changes so expect no calls to merge_timestamps info = broker.get_replication_info() local_calls = [] remote_calls = [] with self._wrap_merge_timestamps(broker, local_calls): with self._wrap_merge_timestamps(broker, remote_calls): success = daemon._repl_to_node(node, broker, part, info) self.assertTrue(success) self.assertEqual(0, len(remote_calls)) self.assertEqual(0, len(local_calls)) self.assertEqual(remote_put_timestamp, broker.get_info()['put_timestamp']) self.assertEqual(remote_put_timestamp, remote_broker.get_info()['put_timestamp']) def test_sync_bogus_db_quarantines(self): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) policy = random.choice(list(POLICIES)) # create "local" broker local_broker = self._get_broker('a', 'c', node_index=0) local_broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(next(ts), policy.idx) db_path = local_broker.db_file self.assertTrue(os.path.exists(db_path)) # sanity check old_inode = os.stat(db_path).st_ino _orig_get_info = backend.ContainerBroker.get_info def fail_like_bad_db(broker): if broker.db_file == local_broker.db_file: raise sqlite3.OperationalError("no such table: container_info") else: return _orig_get_info(broker) part, node = self._get_broker_part_node(remote_broker) with mock.patch('swift.container.backend.ContainerBroker.get_info', fail_like_bad_db): # Have the remote node replicate to local; local should see its # corrupt DB, quarantine it, and act like the DB wasn't ever there # in the first place. daemon = self._run_once(node) self.assertTrue(os.path.exists(db_path)) # Make sure we didn't just keep the old DB, but quarantined it and # made a fresh copy. new_inode = os.stat(db_path).st_ino self.assertNotEqual(old_inode, new_inode) self.assertEqual(daemon.stats['failure'], 0) def _replication_scenarios(self, *scenarios, **kwargs): remote_wins = kwargs.get('remote_wins', False) # these tests are duplicated because of the differences in replication # when row counts cause full rsync vs. merge scenarios = scenarios or ( 'no_row', 'local_row', 'remote_row', 'both_rows') for scenario_name in scenarios: ts = itertools.count(int(time.time())) policy = random.choice(list(POLICIES)) remote_policy = random.choice( [p for p in POLICIES if p is not policy]) broker = self._get_broker('a', 'c', node_index=0) remote_broker = self._get_broker('a', 'c', node_index=1) yield ts, policy, remote_policy, broker, remote_broker # variations on different replication scenarios variations = { 'no_row': (), 'local_row': (broker,), 'remote_row': (remote_broker,), 'both_rows': (broker, remote_broker), } dbs = variations[scenario_name] obj_ts = next(ts) for db in dbs: db.put_object('/a/c/o', obj_ts, 0, 'content-type', 'etag', storage_policy_index=db.storage_policy_index) # replicate part, node = self._get_broker_part_node(broker) daemon = self._run_once(node) self.assertEqual(0, daemon.stats['failure']) # in sync local_info = self._get_broker( 'a', 'c', node_index=0).get_info() remote_info = self._get_broker( 'a', 'c', node_index=1).get_info() if remote_wins: expected = remote_policy.idx err = 'local policy did not change to match remote ' \ 'for replication row scenario %s' % scenario_name else: expected = policy.idx err = 'local policy changed to match remote ' \ 'for replication row scenario %s' % scenario_name self.assertEqual(local_info['storage_policy_index'], expected, err) self.assertEqual(remote_info['storage_policy_index'], local_info['storage_policy_index']) test_db_replicator.TestReplicatorSync.tearDown(self) test_db_replicator.TestReplicatorSync.setUp(self) def test_sync_local_create_policy_over_newer_remote_create(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_local_create_policy_over_newer_remote_delete(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # delete "remote" broker remote_broker.delete_db(next(ts)) def test_sync_local_create_policy_over_older_remote_delete(self): # remote_row & both_rows cases are covered by # "test_sync_remote_half_delete_policy_over_newer_local_create" for setup in self._replication_scenarios( 'no_row', 'local_row'): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # delete older "remote" broker remote_broker.delete_db(next(ts)) # create "local" broker broker.initialize(next(ts), policy.idx) def test_sync_local_half_delete_policy_over_newer_remote_create(self): # no_row & remote_row cases are covered by # "test_sync_remote_create_policy_over_older_local_delete" for setup in self._replication_scenarios('local_row', 'both_rows'): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker broker.initialize(next(ts), policy.idx) # half delete older "local" broker broker.delete_db(next(ts)) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_local_recreate_policy_over_newer_remote_create(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker broker.initialize(next(ts), policy.idx) # older recreate "local" broker broker.delete_db(next(ts)) recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_local_recreate_policy_over_older_remote_create(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker broker.initialize(next(ts), policy.idx) # recreate "local" broker broker.delete_db(next(ts)) recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) def test_sync_local_recreate_policy_over_newer_remote_delete(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # recreate "local" broker broker.delete_db(next(ts)) recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) # older delete "remote" broker remote_broker.delete_db(next(ts)) def test_sync_local_recreate_policy_over_older_remote_delete(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # older delete "remote" broker remote_broker.delete_db(next(ts)) # recreate "local" broker broker.delete_db(next(ts)) recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) def test_sync_local_recreate_policy_over_older_remote_recreate(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker broker.initialize(next(ts), policy.idx) # older recreate "remote" broker remote_broker.delete_db(next(ts)) remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) # recreate "local" broker broker.delete_db(next(ts)) local_recreate_timestamp = next(ts) broker.update_put_timestamp(local_recreate_timestamp) broker.update_status_changed_at(local_recreate_timestamp) def test_sync_remote_create_policy_over_newer_local_create(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker broker.initialize(next(ts), policy.idx) def test_sync_remote_create_policy_over_newer_local_delete(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker broker.initialize(next(ts), policy.idx) # delete "local" broker broker.delete_db(next(ts)) def test_sync_remote_create_policy_over_older_local_delete(self): # local_row & both_rows cases are covered by # "test_sync_local_half_delete_policy_over_newer_remote_create" for setup in self._replication_scenarios( 'no_row', 'remote_row', remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker broker.initialize(next(ts), policy.idx) # delete older "local" broker broker.delete_db(next(ts)) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_remote_half_delete_policy_over_newer_local_create(self): # no_row & both_rows cases are covered by # "test_sync_local_create_policy_over_older_remote_delete" for setup in self._replication_scenarios('remote_row', 'both_rows', remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # half delete older "remote" broker remote_broker.delete_db(next(ts)) # create "local" broker broker.initialize(next(ts), policy.idx) def test_sync_remote_recreate_policy_over_newer_local_create(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # older recreate "remote" broker remote_broker.delete_db(next(ts)) recreate_timestamp = next(ts) remote_broker.update_put_timestamp(recreate_timestamp) remote_broker.update_status_changed_at(recreate_timestamp) # create "local" broker broker.initialize(next(ts), policy.idx) def test_sync_remote_recreate_policy_over_older_local_create(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # recreate "remote" broker remote_broker.delete_db(next(ts)) recreate_timestamp = next(ts) remote_broker.update_put_timestamp(recreate_timestamp) remote_broker.update_status_changed_at(recreate_timestamp) def test_sync_remote_recreate_policy_over_newer_local_delete(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # recreate "remote" broker remote_broker.delete_db(next(ts)) remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) # older delete "local" broker broker.delete_db(next(ts)) def test_sync_remote_recreate_policy_over_older_local_delete(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # older delete "local" broker broker.delete_db(next(ts)) # recreate "remote" broker remote_broker.delete_db(next(ts)) remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) def test_sync_remote_recreate_policy_over_older_local_recreate(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker.initialize(next(ts), remote_policy.idx) # older recreate "local" broker broker.delete_db(next(ts)) local_recreate_timestamp = next(ts) broker.update_put_timestamp(local_recreate_timestamp) broker.update_status_changed_at(local_recreate_timestamp) # recreate "remote" broker remote_broker.delete_db(next(ts)) remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) def test_sync_to_remote_with_misplaced(self): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) # create "local" broker policy = random.choice(list(POLICIES)) broker = self._get_broker('a', 'c', node_index=0) broker.initialize(next(ts), policy.idx) # create "remote" broker remote_policy = random.choice([p for p in POLICIES if p is not policy]) remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(next(ts), remote_policy.idx) # add misplaced row to remote_broker remote_broker.put_object( '/a/c/o', next(ts), 0, 'content-type', 'etag', storage_policy_index=remote_broker.storage_policy_index) # since this row matches policy index or remote, it shows up in count self.assertEqual(remote_broker.get_info()['object_count'], 1) self.assertEqual([], remote_broker.get_misplaced_since(-1, 1)) # replicate part, node = self._get_broker_part_node(broker) daemon = self._run_once(node) # since our local broker has no rows to push it logs as no_change self.assertEqual(1, daemon.stats['no_change']) self.assertEqual(0, broker.get_info()['object_count']) # remote broker updates it's policy index; this makes the remote # broker's object count change info = remote_broker.get_info() expectations = { 'object_count': 0, 'storage_policy_index': policy.idx, } for key, value in expectations.items(): self.assertEqual(info[key], value) # but it also knows those objects are misplaced now misplaced = remote_broker.get_misplaced_since(-1, 100) self.assertEqual(len(misplaced), 1) # we also pushed out to node 3 with rsync self.assertEqual(1, daemon.stats['rsync']) third_broker = self._get_broker('a', 'c', node_index=2) info = third_broker.get_info() for key, value in expectations.items(): self.assertEqual(info[key], value) def test_misplaced_rows_replicate_and_enqueue(self): # force all timestamps to fall in same hour ts = (Timestamp(t) for t in itertools.count(int(time.time()) // 3600 * 3600)) policy = random.choice(list(POLICIES)) broker = self._get_broker('a', 'c', node_index=0) broker.initialize(next(ts).internal, policy.idx) remote_policy = random.choice([p for p in POLICIES if p is not policy]) remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(next(ts).internal, remote_policy.idx) # add a misplaced row to *local* broker obj_put_timestamp = next(ts).internal broker.put_object( 'o', obj_put_timestamp, 0, 'content-type', 'etag', storage_policy_index=remote_policy.idx) misplaced = broker.get_misplaced_since(-1, 10) self.assertEqual(len(misplaced), 1) # since this row is misplaced it doesn't show up in count self.assertEqual(broker.get_info()['object_count'], 0) # add another misplaced row to *local* broker with composite timestamp ts_data = next(ts) ts_ctype = next(ts) ts_meta = next(ts) broker.put_object( 'o2', ts_data.internal, 0, 'content-type', 'etag', storage_policy_index=remote_policy.idx, ctype_timestamp=ts_ctype.internal, meta_timestamp=ts_meta.internal) misplaced = broker.get_misplaced_since(-1, 10) self.assertEqual(len(misplaced), 2) # since this row is misplaced it doesn't show up in count self.assertEqual(broker.get_info()['object_count'], 0) # replicate part, node = self._get_broker_part_node(broker) daemon = self._run_once(node) # push to remote, and third node was missing (also maybe reconciler) self.assertTrue(2 < daemon.stats['rsync'] <= 3, daemon.stats['rsync']) # grab the rsynced instance of remote_broker remote_broker = self._get_broker('a', 'c', node_index=1) # remote has misplaced rows too now misplaced = remote_broker.get_misplaced_since(-1, 10) self.assertEqual(len(misplaced), 2) # and the correct policy_index and object_count info = remote_broker.get_info() expectations = { 'object_count': 0, 'storage_policy_index': policy.idx, } for key, value in expectations.items(): self.assertEqual(info[key], value) # and we should have also enqueued these rows in a single reconciler, # since we forced the object timestamps to be in the same hour. reconciler = daemon.get_reconciler_broker(misplaced[0]['created_at']) # but it may not be on the same node as us anymore though... reconciler = self._get_broker(reconciler.account, reconciler.container, node_index=0) self.assertEqual(reconciler.get_info()['object_count'], 2) objects = reconciler.list_objects_iter( 10, '', None, None, None, None, storage_policy_index=0) self.assertEqual(len(objects), 2) expected = ('%s:/a/c/o' % remote_policy.idx, obj_put_timestamp, 0, 'application/x-put', obj_put_timestamp) self.assertEqual(objects[0], expected) # the second object's listing has ts_meta as its last modified time # but its full composite timestamp is in the hash field. expected = ('%s:/a/c/o2' % remote_policy.idx, ts_meta.internal, 0, 'application/x-put', encode_timestamps(ts_data, ts_ctype, ts_meta)) self.assertEqual(objects[1], expected) # having safely enqueued to the reconciler we can advance # our sync pointer self.assertEqual(broker.get_reconciler_sync(), 2) def test_multiple_out_sync_reconciler_enqueue_normalize(self): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) policy = random.choice(list(POLICIES)) broker = self._get_broker('a', 'c', node_index=0) broker.initialize(next(ts), policy.idx) remote_policy = random.choice([p for p in POLICIES if p is not policy]) remote_broker = self._get_broker('a', 'c', node_index=1) remote_broker.initialize(next(ts), remote_policy.idx) # add some rows to brokers for db in (broker, remote_broker): for p in (policy, remote_policy): db.put_object('o-%s' % p.name, next(ts), 0, 'content-type', 'etag', storage_policy_index=p.idx) db._commit_puts() expected_policy_stats = { policy.idx: {'object_count': 1, 'bytes_used': 0}, remote_policy.idx: {'object_count': 1, 'bytes_used': 0}, } for db in (broker, remote_broker): policy_stats = db.get_policy_stats() self.assertEqual(policy_stats, expected_policy_stats) # each db has 2 rows, 4 total all_items = set() for db in (broker, remote_broker): items = db.get_items_since(-1, 4) all_items.update( (item['name'], item['created_at']) for item in items) self.assertEqual(4, len(all_items)) # replicate both ways part, node = self._get_broker_part_node(broker) self._run_once(node) part, node = self._get_broker_part_node(remote_broker) self._run_once(node) # only the latest timestamps should survive most_recent_items = {} for name, timestamp in all_items: most_recent_items[name] = max( timestamp, most_recent_items.get(name, -1)) self.assertEqual(2, len(most_recent_items)) for db in (broker, remote_broker): items = db.get_items_since(-1, 4) self.assertEqual(len(items), len(most_recent_items)) for item in items: self.assertEqual(most_recent_items[item['name']], item['created_at']) # and the reconciler also collapses updates reconciler_containers = set() for item in all_items: _name, timestamp = item reconciler_containers.add( get_reconciler_container_name(timestamp)) reconciler_items = set() for reconciler_container in reconciler_containers: for node_index in range(3): reconciler = self._get_broker(MISPLACED_OBJECTS_ACCOUNT, reconciler_container, node_index=node_index) items = reconciler.get_items_since(-1, 4) reconciler_items.update( (item['name'], item['created_at']) for item in items) # they can't *both* be in the wrong policy ;) self.assertEqual(1, len(reconciler_items)) for reconciler_name, timestamp in reconciler_items: _policy_index, path = reconciler_name.split(':', 1) a, c, name = path.lstrip('/').split('/') self.assertEqual(most_recent_items[name], timestamp) @contextmanager def _wrap_update_reconciler_sync(self, broker, calls): def wrapper_function(*args, **kwargs): calls.append(args) orig_function(*args, **kwargs) orig_function = broker.update_reconciler_sync broker.update_reconciler_sync = wrapper_function try: yield True finally: broker.update_reconciler_sync = orig_function def test_post_replicate_hook(self): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) broker = self._get_broker('a', 'c', node_index=0) broker.initialize(next(ts), 0) broker.put_object('foo', next(ts), 0, 'text/plain', 'xyz', deleted=0, storage_policy_index=0) info = broker.get_replication_info() self.assertEqual(1, info['max_row']) self.assertEqual(-1, broker.get_reconciler_sync()) daemon = replicator.ContainerReplicator({}) calls = [] with self._wrap_update_reconciler_sync(broker, calls): daemon._post_replicate_hook(broker, info, []) self.assertEqual(1, len(calls)) # repeated call to _post_replicate_hook with no change to info # should not call update_reconciler_sync calls = [] with self._wrap_update_reconciler_sync(broker, calls): daemon._post_replicate_hook(broker, info, []) self.assertEqual(0, len(calls)) def test_update_sync_store_exception(self): class FakeContainerSyncStore(object): def update_sync_store(self, broker): raise OSError(1, '1') daemon = replicator.ContainerReplicator({}, logger=self.logger) daemon.sync_store = FakeContainerSyncStore() ts_iter = make_timestamp_iter() broker = self._get_broker('a', 'c', node_index=0) timestamp = next(ts_iter) broker.initialize(timestamp.internal, POLICIES.default.idx) info = broker.get_replication_info() daemon._post_replicate_hook(broker, info, []) log_lines = self.logger.get_lines_for_level('error') self.assertEqual(1, len(log_lines)) self.assertIn('Failed to update sync_store', log_lines[0]) def test_update_sync_store(self): klass = 'swift.container.sync_store.ContainerSyncStore' daemon = replicator.ContainerReplicator({}) daemon.sync_store = sync_store.ContainerSyncStore( daemon.root, daemon.logger, daemon.mount_check) ts_iter = make_timestamp_iter() broker = self._get_broker('a', 'c', node_index=0) timestamp = next(ts_iter) broker.initialize(timestamp.internal, POLICIES.default.idx) info = broker.get_replication_info() with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: daemon._post_replicate_hook(broker, info, []) self.assertEqual(0, mock_remove.call_count) self.assertEqual(0, mock_add.call_count) timestamp = next(ts_iter) # sync-to and sync-key empty - remove from store broker.update_metadata( {'X-Container-Sync-To': ('', timestamp.internal), 'X-Container-Sync-Key': ('', timestamp.internal)}) with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: daemon._post_replicate_hook(broker, info, []) self.assertEqual(0, mock_add.call_count) mock_remove.assert_called_once_with(broker) timestamp = next(ts_iter) # sync-to is not empty sync-key is empty - remove from store broker.update_metadata( {'X-Container-Sync-To': ('a', timestamp.internal)}) with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: daemon._post_replicate_hook(broker, info, []) self.assertEqual(0, mock_add.call_count) mock_remove.assert_called_once_with(broker) timestamp = next(ts_iter) # sync-to is empty sync-key is not empty - remove from store broker.update_metadata( {'X-Container-Sync-To': ('', timestamp.internal), 'X-Container-Sync-Key': ('secret', timestamp.internal)}) with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: daemon._post_replicate_hook(broker, info, []) self.assertEqual(0, mock_add.call_count) mock_remove.assert_called_once_with(broker) timestamp = next(ts_iter) # sync-to, sync-key both not empty - add to store broker.update_metadata( {'X-Container-Sync-To': ('a', timestamp.internal), 'X-Container-Sync-Key': ('secret', timestamp.internal)}) with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: daemon._post_replicate_hook(broker, info, []) mock_add.assert_called_once_with(broker) self.assertEqual(0, mock_remove.call_count) timestamp = next(ts_iter) # container is removed - need to remove from store broker.delete_db(timestamp.internal) broker.update_metadata( {'X-Container-Sync-To': ('a', timestamp.internal), 'X-Container-Sync-Key': ('secret', timestamp.internal)}) with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: daemon._post_replicate_hook(broker, info, []) self.assertEqual(0, mock_add.call_count) mock_remove.assert_called_once_with(broker) def test_sync_triggers_sync_store_update(self): klass = 'swift.container.sync_store.ContainerSyncStore' ts_iter = make_timestamp_iter() # Create two containers as follows: # broker_1 which is not set for sync # broker_2 which is set for sync and then unset # test that while replicating both we see no activity # for broker_1, and the anticipated activity for broker_2 broker_1 = self._get_broker('a', 'c', node_index=0) broker_1.initialize(next(ts_iter).internal, POLICIES.default.idx) broker_2 = self._get_broker('b', 'd', node_index=0) broker_2.initialize(next(ts_iter).internal, POLICIES.default.idx) broker_2.update_metadata( {'X-Container-Sync-To': ('a', next(ts_iter).internal), 'X-Container-Sync-Key': ('secret', next(ts_iter).internal)}) # replicate once according to broker_1 # relying on the fact that FakeRing would place both # in the same partition. part, node = self._get_broker_part_node(broker_1) with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: self._run_once(node) self.assertEqual(1, mock_add.call_count) self.assertEqual(broker_2.db_file, mock_add.call_args[0][0].db_file) self.assertEqual(0, mock_remove.call_count) broker_2.update_metadata( {'X-Container-Sync-To': ('', next(ts_iter).internal)}) # replicate once this time according to broker_2 # relying on the fact that FakeRing would place both # in the same partition. part, node = self._get_broker_part_node(broker_2) with mock.patch(klass + '.remove_synced_container') as mock_remove: with mock.patch(klass + '.add_synced_container') as mock_add: self._run_once(node) self.assertEqual(0, mock_add.call_count) self.assertEqual(1, mock_remove.call_count) self.assertEqual(broker_2.db_file, mock_remove.call_args[0][0].db_file) if __name__ == '__main__': unittest.main()
from pprint import pprint from rootpy import asrootpy, log, collection from rootpy.plotting import Hist2D from rootpy.io import root_open from data_extractors import \ get_dNdeta_in_classifier_bin_interval,\ get_identified_vs_mult,\ get_correlation_histogram,\ get_PNch_vs_estmult,\ get_meanpt_vs_estmult,\ get_pT_distribution,\ get_mean_nMPI,\ get_graphs_particle_ratios_vs_refmult from utils import \ gen_random_name,\ get_est_dirs,\ make_estimator_title,\ remap_x_values,\ remove_zero_value_points,\ remove_points_with_equal_x,\ remove_points_with_x_err_gt_1NchRef,\ percentile_bin_to_binidx_bin from .roofie import Figure, Styles kPROTON = str(2212) kANTIPROTON = str(-2212) kLAMBDA = str(3122) kANTILAMBDA = str(-3122) kK0S = str(310) kKPLUS = str(321) kKMINUS = str(-321) kPIPLUS = str(211) kPIMINUS = str(-211) kPI0 = str(111) kXI = str(3312) kANTIXI = str(-3312) kOMEGAMINUS = str(3334) kOMEGAPLUS = str(-3334) class Plotting(object): def __init__(self, f_name, sums_dir_name, results_dir_name, percentile_bins, considered_ests): self.f_name = f_name self.sums_dir_name = sums_dir_name self.results_dir_name = results_dir_name # use the last mult bin starts at a multiplicity x times larger than the mean in this estimator # self.mean_mult_cutoff_factor = 4 self.ref_ests = ['EtaLt05', ] self.considered_ests = considered_ests self.perc_bins = percentile_bins # figure out the nch edges corresponding to the percentile edges, depends on P(Nch) self.delete_results_dir() self.make_results_dir() self.plot_event_counters() # needed for calculations of the edges self.nch_edges = self._find_nch_edges_from_percentile_edges() pprint(self.nch_edges) # set the default style for all figures created from her on forward: Figure.style = Styles.Presentation_half def _io_decorator(func): """ Open and close the file befor and after the execution of the decorated function. The purpose ist to clean up memory in this way and to force an update of the file before the next function calls. The wrapper adds the file, sums and results_post to `self`. """ def wrapper(self, **kwargs): with root_open(self.f_name, 'update') as self.f: self.sums = self.f.MultEstimators.__getattr__(self.sums_dir_name) try: self.results_post = self.f.MultEstimators.__getattr__(self.results_dir_name) except AttributeError: # results dir does not exists (yet) pass return_value = func(self, **kwargs) # Delete all TLists in sums since we own them and they would be left in memory otherwise for obj in self.sums: if isinstance(obj, collection.List): obj.Delete() self.sums.Delete() return return_value return wrapper @_io_decorator def _find_nch_edges_from_percentile_edges(self): nch_edges = {} estimators_to_be_removed = [] for est_dir in get_est_dirs(self.results_post, self.considered_ests): event_counter = est_dir.event_counter try: nch_edges[est_dir.GetName()] = [percentile_bin_to_binidx_bin(perc_bin, event_counter) for perc_bin in self.perc_bins[est_dir.GetName()]] except ValueError, e: print "Error occured for classifier " + est_dir.GetName() print e print self.perc_bins[est_dir.GetName()] print "You can change the percentile bins in the beginning of this script" print "For the following, this estimator is removed" estimators_to_be_removed.append(est_dir.GetName()) print "Bin edges for given percentile bins" print nch_edges for est in estimators_to_be_removed: del self.perc_bins[est] del self.considered_ests[self.considered_ests.index(est)] return nch_edges @_io_decorator def delete_results_dir(self): # delete old result directory self.f.rm('MultEstimators/' + self.results_dir_name) self.f.Write() @_io_decorator def make_results_dir(self): self.f.mkdir('MultEstimators/' + self.results_dir_name, recurse=True) for est_dir in get_est_dirs(self.sums, self.considered_ests): try: resdir = self.f.MultEstimators.__getattr__(self.results_dir_name).mkdir(est_dir.GetName()) resdir.Write() except: pass @_io_decorator def plot_particle_ratios_vs_estmult(self, pids1, pids2, scale=None, ytitle=''): ratio_vs_estmult_dir = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path + '/pid_ratios_vs_estmult') fig = Figure() if not ytitle: fig.ytitle = ", ".join(pids1) + " / " + ", ".join(pids2) else: fig.ytitle = ytitle for est_dir in get_est_dirs(self.sums, self.considered_ests): h3d = asrootpy(est_dir.FindObject("fNch_pT_pid")) pids1hists = [get_identified_vs_mult(h3d, pdg) for pdg in pids1] pids2hists = [get_identified_vs_mult(h3d, pdg) for pdg in pids2] pids1_px = sum(pids1hists) pids2_px = sum(pids2hists) ratio1d = pids1_px / pids2_px fig.xtitle = "N_{ch}|_{" + make_estimator_title(est_dir.GetName()) + "}" if scale: ratio1d.Scale(scale) fig.add_plottable(ratio1d, legend_title=make_estimator_title(est_dir.GetName())) name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratio_vs_estmult_dir) @_io_decorator def plot_event_counters(self): log.info("Creating event counters") for est_dir in get_est_dirs(self.sums, self.considered_ests): results_est_dir = self.results_post.__getattr__(est_dir.GetName()) # Nasty, but just use a reference estimator here... corr = get_correlation_histogram(self.sums, est_dir.GetName(), "EtaLt05") counter = asrootpy(corr.ProjectionX()) counter.name = "event_counter" path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path self.f.cd(path) results_est_dir.WriteTObject(counter) @_io_decorator def plot_dNdetas(self, ratio_to_mb): # Loop over all estimators in the Sums list: log.info("Creating dN/deta bin in multiplicity") figs = [] for est_dir in get_est_dirs(self.sums, self.considered_ests): # does this estimator have several multiplicity bins? # Q2, for example only works with pythia and makes no sense to plot # on Dipsy as it would only be the MB line if len(self.nch_edges[est_dir.GetName()]) == 1: continue results_est_dir = self.results_post.Get(est_dir.GetName()) event_counter = asrootpy(results_est_dir.Get("event_counter")) fig = Figure() fig.plot.palette = 'colorblind' fig.xtitle = '#eta' fig.ytitle = 'Ratio of dN_{ch}/d#eta over MB result' if ratio_to_mb else '1/N #times dN_{ch}/d#eta' fig.legend.title = make_estimator_title(est_dir.GetName()) fig.plot.ymin = 0 dNdeta_mb = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, [1, event_counter.GetXaxis().GetNbins()]) for cls_bin, perc_bin in zip(self.nch_edges[est_dir.GetName()], self.perc_bins[est_dir.GetName()]): title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) dNdeta_in_interval = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, cls_bin) if ratio_to_mb: fig.add_plottable(dNdeta_in_interval / dNdeta_mb, legend_title=title) else: fig.add_plottable(dNdeta_in_interval, legend_title=title) # add MB as well, if it is not the ratio plots we are making if not ratio_to_mb: title = "MB" fig.add_plottable(dNdeta_mb, legend_title=title) path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path if ratio_to_mb: fig.save_to_root_file(self.f, "dNdeta_MB_ratio_summary", path=path) else: fig.save_to_root_file(self.f, "dNdeta_summary", path=path) figs.append(fig) return figs @_io_decorator def plot_pt_distribution_ratios(self): # create particle ratio vs pT plots log.info("Computing histograms vs pt") results_path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path # Loop over all estimators in the Sums list: figs = [] def get_new_figure(): fig = Figure() fig.xtitle = 'p_{T} (GeV)' fig.plot.ymin = 0 fig.plot.xmax = 10 fig.plot.palette = 'colorblind' # fig.plot.palette_ncolors = len(nch_edges) - 1 fig.legend.position = 'br' return fig for est_dir in get_est_dirs(self.results_post, self.considered_ests): dirname = '{0}/{1}/pid_ratios/'.format(results_path, est_dir.GetName()) mult_binned_pt_dists = {} mult_binned_pt_dists['proton'] = [ get_pT_distribution(est_dir, [kANTIPROTON, kPROTON], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['pi_ch'] = [ get_pT_distribution(est_dir, [kPIMINUS, kPIPLUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['xi'] = [ get_pT_distribution(est_dir, [kANTIXI, kXI], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['omega'] = [ get_pT_distribution(est_dir, [kOMEGAMINUS, kOMEGAPLUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['lambda'] = [ get_pT_distribution(est_dir, [kANTILAMBDA, kLAMBDA], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['k0s'] = [ get_pT_distribution(est_dir, [kK0S], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['k_ch'] = [ get_pT_distribution(est_dir, [kKPLUS, kKMINUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['pi0'] = [ get_pT_distribution(est_dir, [kPI0], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] perc_titles = ["{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) for perc_bin in self.perc_bins[est_dir.GetName()]] fig = get_new_figure() name = "proton_over_pich__vs__pt" fig.ytitle = "(p+#bar{p})/#pi^{+-}" fig.plot.ymax = .3 fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_pich__vs__pt" fig.plot.ymax = .06 fig.legend.position = 'tl' fig.ytitle = "#Xi/#pi^{+-}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_pich__vs__pt" fig.plot.ymax = .005 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/#pi^{+-} " fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) # Ratios to pi0 fig = get_new_figure() name = "pich_over_pi0__vs__pt" fig.plot.ymax = 2.5 fig.legend.position = 'bl' fig.ytitle = "#pi^{+-}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['pi_ch'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "proton_over_pi0__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'tr' fig.ytitle = "p/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "K0S_over_pi0__vs__pt" fig.plot.ymax = 1.4 fig.legend.position = 'tl' fig.ytitle = "K^{0}_{S}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['k0s'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Lambda_over_pi0__vs__pt" fig.plot.ymax = .9 fig.legend.position = 'tl' fig.ytitle = "#Lambda/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_pi0__vs__pt" fig.plot.ymax = .08 fig.legend.position = 'tl' fig.ytitle = "#Xi/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_pi0__vs__pt" fig.plot.ymax = .005 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) # Ratios to K0S fig = get_new_figure() name = "proton_over_K0S__vs__pt" fig.plot.ymax = 2.6 fig.legend.position = 'tr' fig.ytitle = "p/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Lambda_over_K0S__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'bl' fig.ytitle = "#Lambda/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_K0S__vs__pt" fig.plot.ymax = .2 fig.legend.position = 'tl' fig.ytitle = "#Xi/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_K0S__vs__pt" fig.plot.ymax = .012 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Kaon_over_pich__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'tl' fig.ytitle = "(K^{+} + K^{-}) / (#pi^{+} +#pi^{-})" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['k_ch'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) return figs @_io_decorator def plot_PNch_summary(self): log.info("Creating P(Nch) summary plot") summary_fig = Figure() summary_fig.xtitle = "N_{ch}^{est}" summary_fig.ytitle = "P(N_{ch}^{est})" summary_fig.legend.position = 'tr' summary_fig.plot.logy = True for est_dir in get_est_dirs(self.sums, self.considered_ests): est_name = est_dir.GetName() h_tmp = get_PNch_vs_estmult(self.sums, est_name) if h_tmp.Integral() > 0: h_tmp.Scale(1.0 / h_tmp.Integral()) summary_fig.add_plottable(h_tmp, make_estimator_title(est_name)) path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path summary_fig.save_to_root_file(self.f, "PNch_summary", path=path) # list as return type is expected for making the pdf return [summary_fig] @_io_decorator def plot_PNch(self): log.info("Creating P(Nch_est) and P(Nch_refest) histograms") # mult_bin_size = 10 figs = [] for ref_est_name in self.ref_ests: for res_est_dir in get_est_dirs(self.results_post, self.considered_ests): est_name = res_est_dir.GetName() # Figure properties: fig_vs_estmult = Figure() fig_vs_refmult = Figure() fig_vs_estmult.plot.logy = True fig_vs_refmult.plot.logy = True fig_vs_estmult.plot.palette = 'colorblind' fig_vs_refmult.plot.palette = 'colorblind' fig_vs_estmult.legend.position = 'tr' fig_vs_refmult.legend.position = 'tr' fig_vs_estmult.xtitle = "N_{{ch}}^{{{0}}}".format(est_name) fig_vs_refmult.xtitle = "N_{{ch}}^{{{0}}}".format(ref_est_name) fig_vs_estmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(est_name) fig_vs_refmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(ref_est_name) corr_hist = get_correlation_histogram(self.sums, est_name, ref_est_name) # logic when dealing with fixed bins given in Nch: # ------------------------------------------------ # mean_nch_est = corr_hist.GetMean(1) # mean of x axis # nch_max = corr_hist.xaxis.GetNbins() # nch_cutoff = mean_nch_est * mean_mult_cutoff_factor # nch_bins = [(low, low + mult_bin_size) for low in range(0, int(nch_cutoff), mult_bin_size)] # # a large last bin covering the rest: # nch_bins += [(nch_bins[-1][2], nch_max)] # legend_tmpl = "{} < N_{ch} < {}" # logic when dealing with percentile bins: # ---------------------------------------- # event_counter_est = asrootpy(getattr(res_est_dir, "event_counter")) legend_tmpl = "{0}% - {1}%" fig_vs_estmult.legend.title = "Selected in {0}".format(make_estimator_title(ref_est_name)) fig_vs_refmult.legend.title = "Selected in {0}".format(make_estimator_title(est_name)) # WARNING: the following needs tweeking when going back to fixed N_ch bins! for nch_bin, perc_bin in zip(self.nch_edges[ref_est_name], self.perc_bins[ref_est_name]): # vs est_mult: corr_hist.xaxis.SetRange(0, 0) # reset x axis corr_hist.yaxis.SetRange(nch_bin[0], nch_bin[1]) h_vs_est = asrootpy(corr_hist.ProjectionX(gen_random_name())) if h_vs_est.Integral() > 0: h_vs_est.Scale(1.0 / h_vs_est.Integral()) fig_vs_estmult.add_plottable(h_vs_est, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100)) else: log.info("No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen". format(perc_bin, ref_est_name)) for nch_bin, perc_bin in zip(self.nch_edges[est_name], self.perc_bins[est_name]): # vs ref_mult: corr_hist.yaxis.SetRange(0, 0) # reset y axis corr_hist.xaxis.SetRange(*nch_bin) h_vs_ref = asrootpy(corr_hist.ProjectionY(gen_random_name())) if h_vs_ref.Integral() > 0: h_vs_ref.Scale(1.0 / h_vs_ref.Integral()) fig_vs_refmult.add_plottable(h_vs_ref, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100)) else: log.info( "No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen". format(perc_bin, est_name)) path = res_est_dir.GetPath().split(":")[1] # vs est_mult fig_vs_estmult.save_to_root_file(self.f, "PNchEst_binned_in_Nch{0}".format(ref_est_name), path) # vs est_mult fig_vs_refmult.save_to_root_file(self.f, "PNch{0}_binned_in_NchEst".format(ref_est_name), path) figs.append(fig_vs_estmult) figs.append(fig_vs_refmult) return figs @_io_decorator def plot_mult_vs_pt(self): log.info("Makeing 2D pt plots for each particle kind") for est_dir in get_est_dirs(self.sums, self.considered_ests): path = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path + "/" + est_dir.GetName() + "/mult_pt") try: self.f.mkdir(path, recurse=True) except ValueError: pass self.f.cd(path) h3d = asrootpy(est_dir.FindObject('classifier_pT_PID_{0}'.format(est_dir.GetName()))) # loop through all particle kinds: nPIDs = h3d.zaxis.GetNbins() for ibin in range(1, nPIDs + 1): h3d.zaxis.SetRange(ibin, ibin) mult_pt = asrootpy(h3d.Project3D("yx")) mult_pt.name = h3d.zaxis.GetBinLabel(ibin) mult_pt.Write() @_io_decorator def plot_correlation(self): # Make correlations between estimators log.info("Correlating N_ch of each estimator") corr_dir = self.results_post.GetPath().split(":")[1] + '/correlations' try: self.f.mkdir(corr_dir, recurse=True) except: pass # Take ntuple from the first estimator and then add friends to this one nt0 = self.sums[0].FindObject("fEventTuple") nt0.SetAlias(self.sums[0].GetName(), "fEventTuple") # build ntuple for est_dir in self.sums[1:]: nt0.AddFriend(est_dir.FindObject("fEventTuple"), est_dir.GetName()) for ref_est in self.considered_ests: for est_dir in self.sums: log.info("Correlating {0} with {1}".format(ref_est, est_dir.GetName())) corr_hist = Hist2D(400, 0, 400, 400, 0, 400, name="corr_hist_{0}_vs_{1}".format(ref_est, est_dir.GetName())) # Lables are deliberatly swaped, see Projection below! corr_hist.title = ("Correlation N_{{ch}} in {0} and {1};N_{{ch}} {1};N_{{ch}} {0}" .format(ref_est, est_dir.GetName())) # this projects onto y:x, to make coding more adventurous nt0.Project(corr_hist.name, "{0}.nch:{1}.nch".format(ref_est, est_dir.GetName()), "ev_weight") corr_hist.drawstyle = 'colz' self.f.cd(corr_dir) corr_hist.write() @_io_decorator def plot_pid_ratio_vs_refmult(self): log.info("Creating plots vs refmult") ratios_dir = self.results_post.GetPath().split(":")[1] + '/pid_ratios_vs_refmult' def get_new_figure(): fig = Figure() fig.plot.ncolors = len(self.considered_ests) fig.xtitle = "N_{ch}|_{" + make_estimator_title('EtaLt05') + "}" fig.plot.xmin = 0 fig.plot.xmax = 60 return fig figs = [] # Proton / pi_ch fig = get_new_figure() pids1, pids2 = ['-2212', '2212'], ['-211', '211'] fig.ytitle = "p/#pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.04, 0.13 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, ) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K / pi_ch fig = get_new_figure() pids1, pids2 = ['310', '321', '-321'], ['-211', '211'] fig.ytitle = "K^{*}/#pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.09, 0.30 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Lambda / pi_ch fig = get_new_figure() pids1, pids2 = ['3122'], ['-211', '211'] fig.ytitle = "#Lambda / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.005, 0.035 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Xi / pi_ch fig = get_new_figure() pids1, pids2 = ['3312'], ['-211', '211'] fig.ytitle = "#Xi / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.0004, 0.003 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Omega / pi_ch fig = get_new_figure() pids1, pids2 = ['3334', '-3334'], ['-211', '211'] fig.ytitle = "#Omega / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.00001, 0.0005 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # pi_ch/pi0 fig = get_new_figure() pids1, pids2 = ['-211', '211'], ['111'] fig.ytitle = "#pi^{+-}/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 1.5, 2.2 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # proton / pi0 fig = get_new_figure() pids1, pids2 = ['-2212', '2212'], ['111'] fig.ytitle = "p/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.09, 0.30 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K / pi0 fig = get_new_figure() pids1, pids2 = ['310', '321', '-321'], ['111'] fig.ytitle = "K^{*}/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.15, 0.50 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Lambda / pi0 fig = get_new_figure() pids1, pids2 = ['3122'], ['111'] fig.ytitle = "#Lambda/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.014, 0.045 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Xi / pi0 fig = get_new_figure() pids1, pids2 = ['3312'], ['111'] fig.ytitle = "#Xi/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.0010, 0.005 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Omega / pi0 fig = get_new_figure() pids1, pids2 = ['3334', '-3334'], ['111'] fig.ytitle = "#Omega/#pi^{0}" fig.legend.position = 'tl' fig.plot.ymin, fig.plot.ymax = 0.00002, 0.0008 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K_ch / K0_S fig = get_new_figure() pids1, pids2 = ['321', '-321'], ['310'] fig.ytitle = "(K^{+}+K^{-}) / (2#timesK^{0}_{S})" fig.plot.ymin, fig.plot.ymax = 0.4, 1.5 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, scale=.5) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K0_S / Lambda fig = get_new_figure() pids1, pids2 = ['310'], ['-3122', '3122'] fig.ytitle = "K^{0}_{S} / #Lambda" fig.plot.ymin, fig.plot.ymax = 1.3, 3.7 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K0_S / Xi fig = get_new_figure() pids1, pids2 = ['310'], ['3312'] fig.ytitle = "K^{0}_{S} / #Xi" fig.plot.ymin, fig.plot.ymax = 15, 80 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) return figs # ###################################################################################### # # vs Est mult # _plot_particle_ratios_vs_estmult(self, ['321', '-321'], ['310'], # scale=.5, fig.ytitle = "(K^{+} + K^{-}) / (2*K_{S}^{0})") @_io_decorator def plot_meanpt_vs_ref_mult_for_pids(self): log.info("Creating mean pT plots") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] corr_hist = get_correlation_histogram(self.sums, sums_est_dir.GetName(), "EtaLt05") # Get the <pT> per classifier bin; then, re-map the classifier value to the reference classifier (eg EtaLt05) # This might not make a lot of sense, actually. Maybe it would be much more telling if I were to # put the percentile bins on the x-axis? As in the highest 1% of that classifier has a <pT> of ... graphs = [] graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPI0, kPIMINUS, kPIPLUS]), corr_hist)) graphs[-1].title = "#pi" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kKMINUS, kKPLUS]), corr_hist)) graphs[-1].title = "K^{#pm}" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPROTON, kANTIPROTON]), corr_hist)) graphs[-1].title = "p" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kK0S]), corr_hist)) graphs[-1].title = "K^{0}_{S}" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kLAMBDA, kANTILAMBDA]), corr_hist)) graphs[-1].title = "#Lambda" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kXI, kANTIXI]), corr_hist)) graphs[-1].title = "#Xi" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kOMEGAMINUS, kOMEGAPLUS]), corr_hist)) graphs[-1].title = "#Omega" # sanitize graphs: for g in graphs: remove_zero_value_points(g) remove_points_with_x_err_gt_1NchRef(g) remove_points_with_equal_x(g) fig = Figure() fig.plot.palette = 'root' fig.plot.ncolors = 7 fig.plot.xmin = 0 fig.plot.xmax = 40 fig.plot.ymin = 0.3 fig.plot.ymax = 2.1 fig.ytitle = "<p_{T}>" fig.xtitle = "N_{ch}|_{|#eta|<0.5}" fig.legend.title = make_estimator_title(sums_est_dir.GetName()) [fig.add_plottable(g, g.title) for g in graphs] fig.save_to_root_file(self.f, "mean_pt", res_dir_str) figs.append(fig) return figs # def _plot_event_counter_with_shaded_perc_areas(f, results_post): # log.info("Broken: Root sucks! Creating shaded event counter with percentile regions") # return # for est_dir in get_est_dirs(results_post): # event_counter = asrootpy(getattr(est_dir, "event_counter")) # nch_edges = get_Nch_edges_for_percentile_edges(perc_edges, event_counter) # c = Canvas(name="event_counter_with_perc") # leg = Legend(len(nch_edges) - 1) # copies = [] # colors = get_color_generator(ncolors=10) # # Draw the hist once # event_counter.Draw() # for nch_low, nch_up in zip(nch_edges[:-1], nch_edges[1:]): # copies.append(event_counter.Clone(gen_random_name())) # copies[-1].xaxis.SetRangeUser(nch_low, nch_up) # copies[-1].SetFillStyle(1001) # copies[-1].color = next(colors) # copies[-1].xaxis.title = "N_{ch}" # copies[-1].yaxis.title = "counts" # leg.AddEntry(copies[-1], "{}-{}%".format(str(nch_low), str(nch_up))) # copies[-1].Draw('sameHist') # break # leg.Draw() # est_dir.cd() # c.Write() @_io_decorator def plot_dNdpT(self, pid_selection): """ Plot dNdpT particles in pid_selection Parameters ---------- pid_selection : str Either all charged particles ('ch') or 'pi', 'K' or 'p' """ log.info("1/N_evts dN_ch/dpT plots") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] fig = Figure() fig.plot.palette = 'colorblind' # fig.plot.ncolors = 5 fig.legend.position = 'tr' fig.ytitle = "1/N_{evts} dN/dp_{T} (" + make_estimator_title(sums_est_dir.GetName()) + ")" fig.xtitle = "p_{T} (GeV)" fig.plot.logy = True hists = [] if pid_selection == 'ch': fig.legend.title = "#pi^{#pm}, K^{#pm}, p, #Lambda, #Xi, #Omega" pid_numbers = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON, kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS] if pid_selection == 'pi': fig.legend.title = "#pi^{#pm}" pid_numbers = [kPIMINUS, kPIPLUS] if pid_selection == 'K': fig.legend.title = "K^{#pm}" pid_numbers = [kKMINUS, kKPLUS] if pid_selection == 'p': fig.legend.title = "p, #bar{p}" pid_numbers = [kPROTON, kANTIPROTON] for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]): hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin, normalized=False)) hists[-1].title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) # add MB last to be consistent with colors in other plots; the very first and very last bin we look at classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1]) hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin_mb, normalized=False)) hists[-1].title = "MB" # scale by bin width [h.Scale(1, "width") for h in hists] [fig.add_plottable(p, p.title) for p in hists] fig.save_to_root_file(self.f, "dN{0}dpT".format(pid_selection), res_dir_str) figs.append(fig) return figs @_io_decorator def plot_pT_HM_div_pt_MB(self, scale_nMPI): log.info("Plot dN_{HM}/dpT / dN_{MB}/dpT ratios scaled with nMPI") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] fig = Figure() fig.plot.palette = 'root' fig.plot.ncolors = 7 fig.xtitle = "p_{T} (GeV)" fig.legend.title = make_estimator_title(sums_est_dir.GetName()) if scale_nMPI: fig.ytitle = ("#left[ #frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}} #right] " "#times #left[ #frac{<N_{MPI}^{MB}>}{<N_{MPI}^{HM}>} #right]") else: fig.ytitle = "#frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}}" charged_particles = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON, kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS] # get the MB distribution which will be used to devide the nch-binned distributions classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1]) pt_dist_mb = get_pT_distribution(res_est_dir, charged_particles, classifier_bin_mb, normalized=False) mean_nmpi_mb = get_mean_nMPI(sums_est_dir, classifier_bin_mb) for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]): # get the pt distribution in this Nch interval pt_dist_in_interval = get_pT_distribution(res_est_dir, charged_particles, classifier_bin, normalized=False) title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) if scale_nMPI: mean_nmpi_hm = get_mean_nMPI(sums_est_dir, classifier_bin) fig.add_plottable((pt_dist_in_interval / pt_dist_mb) * (mean_nmpi_mb / mean_nmpi_hm), title) name = "pt_hm_div_pt_mb_scaled_nMPI" else: fig.add_plottable((pt_dist_in_interval / pt_dist_mb), title) name = "pt_hm_div_pt_mb" fig.save_to_root_file(self.f, name, res_dir_str) figs.append(fig) return figs @_io_decorator def plot_nMPI_vs_Nch(self): log.info("Creating nMPI(Nch) summary plot") summary_fig = Figure() summary_fig.xtitle = "N_{ch}^{est}" summary_fig.ytitle = "<N_{MPI}>" summary_fig.plot.palette = 'root' summary_fig.legend.position = 'br' summary_fig.plot.logy = True summary_fig.plot.ymin = 1 for est_dir in get_est_dirs(self.sums, self.considered_ests): h_tmp = asrootpy(get_correlation_histogram(self.sums, est_dir.GetName(), "nMPI").ProfileX()) summary_fig.add_plottable(h_tmp, make_estimator_title(est_dir.GetName())) path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path summary_fig.save_to_root_file(self.f, "nMPI_summary", path=path) return [summary_fig]
# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Compute API that proxies via Cells Service.""" from nova import availability_zones from nova import block_device from nova.cells import rpcapi as cells_rpcapi from nova.cells import utils as cells_utils from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import rpcapi as compute_rpcapi from nova.compute import vm_states from nova import exception from nova.openstack.common import excutils from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) check_instance_state = compute_api.check_instance_state wrap_check_policy = compute_api.wrap_check_policy check_policy = compute_api.check_policy check_instance_lock = compute_api.check_instance_lock def validate_cell(fn): def _wrapped(self, context, instance, *args, **kwargs): self._validate_cell(instance, fn.__name__) return fn(self, context, instance, *args, **kwargs) _wrapped.__name__ = fn.__name__ return _wrapped class ComputeRPCAPINoOp(object): def __getattr__(self, key): def _noop_rpc_wrapper(*args, **kwargs): return None return _noop_rpc_wrapper class SchedulerRPCAPIRedirect(object): def __init__(self, cells_rpcapi_obj): self.cells_rpcapi = cells_rpcapi_obj def __getattr__(self, key): def _noop_rpc_wrapper(*args, **kwargs): return None return _noop_rpc_wrapper def run_instance(self, context, **kwargs): self.cells_rpcapi.schedule_run_instance(context, **kwargs) class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI): """Class used to substitute Compute RPC API that will proxy via the cells manager to a compute manager in a child cell. """ def __init__(self, *args, **kwargs): super(ComputeRPCProxyAPI, self).__init__(*args, **kwargs) self.cells_rpcapi = cells_rpcapi.CellsAPI() def cast(self, ctxt, msg, topic=None, version=None): self._set_version(msg, version) topic = self._get_topic(topic) self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic) def call(self, ctxt, msg, topic=None, version=None, timeout=None): self._set_version(msg, version) topic = self._get_topic(topic) return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic, call=True, timeout=timeout) class ComputeCellsAPI(compute_api.API): def __init__(self, *args, **kwargs): super(ComputeCellsAPI, self).__init__(*args, **kwargs) self.cells_rpcapi = cells_rpcapi.CellsAPI() # Avoid casts/calls directly to compute self.compute_rpcapi = ComputeRPCAPINoOp() # Redirect scheduler run_instance to cells. self.scheduler_rpcapi = SchedulerRPCAPIRedirect(self.cells_rpcapi) def _cell_read_only(self, cell_name): """Is the target cell in a read-only mode?""" # FIXME(comstud): Add support for this. return False def _validate_cell(self, instance, method): cell_name = instance['cell_name'] if not cell_name: raise exception.InstanceUnknownCell( instance_uuid=instance['uuid']) if self._cell_read_only(cell_name): raise exception.InstanceInvalidState( attr="vm_state", instance_uuid=instance['uuid'], state="temporary_readonly", method=method) def _cast_to_cells(self, context, instance, method, *args, **kwargs): instance_uuid = instance['uuid'] cell_name = instance['cell_name'] if not cell_name: raise exception.InstanceUnknownCell(instance_uuid=instance_uuid) self.cells_rpcapi.cast_compute_api_method(context, cell_name, method, instance_uuid, *args, **kwargs) def _call_to_cells(self, context, instance, method, *args, **kwargs): instance_uuid = instance['uuid'] cell_name = instance['cell_name'] if not cell_name: raise exception.InstanceUnknownCell(instance_uuid=instance_uuid) return self.cells_rpcapi.call_compute_api_method(context, cell_name, method, instance_uuid, *args, **kwargs) def _check_requested_networks(self, context, requested_networks): """Override compute API's checking of this. It'll happen in child cell """ return def _validate_image_href(self, context, image_href): """Override compute API's checking of this. It'll happen in child cell """ return def backup(self, context, instance, name, backup_type, rotation, extra_properties=None, image_id=None): """Backup the given instance.""" image_meta = super(ComputeCellsAPI, self).backup(context, instance, name, backup_type, rotation, extra_properties=extra_properties, image_id=image_id) image_id = image_meta['id'] self._cast_to_cells(context, instance, 'backup', name, backup_type=backup_type, rotation=rotation, extra_properties=extra_properties, image_id=image_id) return image_meta def snapshot(self, context, instance, name, extra_properties=None, image_id=None): """Snapshot the given instance.""" image_meta = super(ComputeCellsAPI, self).snapshot(context, instance, name, extra_properties=extra_properties, image_id=image_id) image_id = image_meta['id'] self._cast_to_cells(context, instance, 'snapshot', name, extra_properties=extra_properties, image_id=image_id) return image_meta def create(self, *args, **kwargs): """We can use the base functionality, but I left this here just for completeness. """ return super(ComputeCellsAPI, self).create(*args, **kwargs) def update_state(self, context, instance, new_state): """Updates the state of a compute instance. For example to 'active' or 'error'. Also sets 'task_state' to None. Used by admin_actions api :param context: The security context :param instance: The instance to update :param new_state: A member of vm_state to change the instance's state to, eg. 'active' """ self.update(context, instance, pass_on_state_change=True, vm_state=new_state, task_state=None) def update(self, context, instance, pass_on_state_change=False, **kwargs): """ Update an instance. :param pass_on_state_change: if true, the state change will be passed on to child cells """ cell_name = instance['cell_name'] if cell_name and self._cell_read_only(cell_name): raise exception.InstanceInvalidState( attr="vm_state", instance_uuid=instance['uuid'], state="temporary_readonly", method='update') rv = super(ComputeCellsAPI, self).update(context, instance, **kwargs) kwargs_copy = kwargs.copy() if not pass_on_state_change: # We need to skip vm_state/task_state updates... those will # happen via a _cast_to_cells when running a different # compute api method kwargs_copy.pop('vm_state', None) kwargs_copy.pop('task_state', None) if kwargs_copy: try: self._cast_to_cells(context, instance, 'update', **kwargs_copy) except exception.InstanceUnknownCell: pass return rv def _local_delete(self, context, instance, bdms): # This will get called for every delete in the API cell # because _delete() in compute/api.py will not find a # service when checking if it's up. # We need to only take action if there's no cell_name. Our # overrides of delete() and soft_delete() will take care of # the rest. cell_name = instance['cell_name'] if not cell_name: return super(ComputeCellsAPI, self)._local_delete(context, instance, bdms) def soft_delete(self, context, instance): self._handle_cell_delete(context, instance, super(ComputeCellsAPI, self).soft_delete, 'soft_delete') def delete(self, context, instance): self._handle_cell_delete(context, instance, super(ComputeCellsAPI, self).delete, 'delete') def _handle_cell_delete(self, context, instance, method, method_name): """Terminate an instance.""" # We can't use the decorator because we have special logic in the # case we don't know the cell_name... cell_name = instance['cell_name'] if cell_name and self._cell_read_only(cell_name): raise exception.InstanceInvalidState( attr="vm_state", instance_uuid=instance['uuid'], state="temporary_readonly", method=method_name) method(context, instance) try: self._cast_to_cells(context, instance, method_name) except exception.InstanceUnknownCell: # If there's no cell, there's also no host... which means # the instance was destroyed from the DB here. Let's just # broadcast a message down to all cells and hope this ends # up resolving itself... Worse case.. the instance will # show back up again here. delete_type = method_name == 'soft_delete' and 'soft' or 'hard' self.cells_rpcapi.instance_delete_everywhere(context, instance, delete_type) @validate_cell def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" super(ComputeCellsAPI, self).restore(context, instance) self._cast_to_cells(context, instance, 'restore') @validate_cell def force_delete(self, context, instance): """Force delete a previously deleted (but not reclaimed) instance.""" super(ComputeCellsAPI, self).force_delete(context, instance) self._cast_to_cells(context, instance, 'force_delete') @validate_cell def stop(self, context, instance, do_cast=True): """Stop an instance.""" super(ComputeCellsAPI, self).stop(context, instance) if do_cast: self._cast_to_cells(context, instance, 'stop', do_cast=True) else: return self._call_to_cells(context, instance, 'stop', do_cast=False) @validate_cell def start(self, context, instance): """Start an instance.""" super(ComputeCellsAPI, self).start(context, instance) self._cast_to_cells(context, instance, 'start') @validate_cell def reboot(self, context, instance, *args, **kwargs): """Reboot the given instance.""" super(ComputeCellsAPI, self).reboot(context, instance, *args, **kwargs) self._cast_to_cells(context, instance, 'reboot', *args, **kwargs) @validate_cell def rebuild(self, context, instance, *args, **kwargs): """Rebuild the given instance with the provided attributes.""" super(ComputeCellsAPI, self).rebuild(context, instance, *args, **kwargs) self._cast_to_cells(context, instance, 'rebuild', *args, **kwargs) @validate_cell def evacuate(self, context, instance, *args, **kwargs): """Evacuate the given instance with the provided attributes.""" super(ComputeCellsAPI, self).evacuate(context, instance, *args, **kwargs) self._cast_to_cells(context, instance, 'evacuate', *args, **kwargs) @check_instance_state(vm_state=[vm_states.RESIZED]) @validate_cell def revert_resize(self, context, instance): """Reverts a resize, deleting the 'new' instance in the process.""" super(ComputeCellsAPI, self).revert_resize(context, instance) self._cast_to_cells(context, instance, 'revert_resize') @check_instance_state(vm_state=[vm_states.RESIZED]) @validate_cell def confirm_resize(self, context, instance): """Confirms a migration/resize and deletes the 'old' instance.""" super(ComputeCellsAPI, self).confirm_resize(context, instance) self._cast_to_cells(context, instance, 'confirm_resize') @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED], task_state=[None]) @validate_cell def resize(self, context, instance, *args, **kwargs): """Resize (ie, migrate) a running instance. If flavor_id is None, the process is considered a migration, keeping the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. """ super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs) # NOTE(johannes): If we get to this point, then we know the # specified flavor_id is valid and exists. We'll need to load # it again, but that should be safe. old_instance_type = instance_types.extract_instance_type(instance) flavor_id = kwargs.get('flavor_id') if not flavor_id: new_instance_type = old_instance_type else: new_instance_type = instance_types.extract_instance_type(instance, 'new_') # NOTE(johannes): Later, when the resize is confirmed or reverted, # the superclass implementations of those methods will need access # to a local migration record for quota reasons. We don't need # source and/or destination information, just the old and new # instance_types. Status is set to 'finished' since nothing else # will update the status along the way. self.db.migration_create(context.elevated(), {'instance_uuid': instance['uuid'], 'old_instance_type_id': old_instance_type['id'], 'new_instance_type_id': new_instance_type['id'], 'status': 'finished'}) # FIXME(comstud): pass new instance_type object down to a method # that'll unfold it self._cast_to_cells(context, instance, 'resize', *args, **kwargs) @validate_cell def add_fixed_ip(self, context, instance, *args, **kwargs): """Add fixed_ip from specified network to given instance.""" super(ComputeCellsAPI, self).add_fixed_ip(context, instance, *args, **kwargs) self._cast_to_cells(context, instance, 'add_fixed_ip', *args, **kwargs) @validate_cell def remove_fixed_ip(self, context, instance, *args, **kwargs): """Remove fixed_ip from specified network to given instance.""" super(ComputeCellsAPI, self).remove_fixed_ip(context, instance, *args, **kwargs) self._cast_to_cells(context, instance, 'remove_fixed_ip', *args, **kwargs) @validate_cell def pause(self, context, instance): """Pause the given instance.""" super(ComputeCellsAPI, self).pause(context, instance) self._cast_to_cells(context, instance, 'pause') @validate_cell def unpause(self, context, instance): """Unpause the given instance.""" super(ComputeCellsAPI, self).unpause(context, instance) self._cast_to_cells(context, instance, 'unpause') def get_diagnostics(self, context, instance): """Retrieve diagnostics for the given instance.""" # FIXME(comstud): Cache this? # Also: only calling super() to get state/policy checking super(ComputeCellsAPI, self).get_diagnostics(context, instance) return self._call_to_cells(context, instance, 'get_diagnostics') @validate_cell def suspend(self, context, instance): """Suspend the given instance.""" super(ComputeCellsAPI, self).suspend(context, instance) self._cast_to_cells(context, instance, 'suspend') @validate_cell def resume(self, context, instance): """Resume the given instance.""" super(ComputeCellsAPI, self).resume(context, instance) self._cast_to_cells(context, instance, 'resume') @validate_cell def rescue(self, context, instance, rescue_password=None): """Rescue the given instance.""" super(ComputeCellsAPI, self).rescue(context, instance, rescue_password=rescue_password) self._cast_to_cells(context, instance, 'rescue', rescue_password=rescue_password) @validate_cell def unrescue(self, context, instance): """Unrescue the given instance.""" super(ComputeCellsAPI, self).unrescue(context, instance) self._cast_to_cells(context, instance, 'unrescue') @validate_cell def set_admin_password(self, context, instance, password=None): """Set the root/admin password for the given instance.""" super(ComputeCellsAPI, self).set_admin_password(context, instance, password=password) self._cast_to_cells(context, instance, 'set_admin_password', password=password) @validate_cell def inject_file(self, context, instance, *args, **kwargs): """Write a file to the given instance.""" super(ComputeCellsAPI, self).inject_file(context, instance, *args, **kwargs) self._cast_to_cells(context, instance, 'inject_file', *args, **kwargs) @wrap_check_policy @validate_cell def get_vnc_console(self, context, instance, console_type): """Get a url to a VNC Console.""" if not instance['host']: raise exception.InstanceNotReady(instance_id=instance['uuid']) connect_info = self._call_to_cells(context, instance, 'get_vnc_connect_info', console_type) self.consoleauth_rpcapi.authorize_console(context, connect_info['token'], console_type, connect_info['host'], connect_info['port'], connect_info['internal_access_path'], instance_uuid=instance['uuid']) return {'url': connect_info['access_url']} @wrap_check_policy @validate_cell def get_spice_console(self, context, instance, console_type): """Get a url to a SPICE Console.""" if not instance['host']: raise exception.InstanceNotReady(instance_id=instance['uuid']) connect_info = self._call_to_cells(context, instance, 'get_spice_connect_info', console_type) self.consoleauth_rpcapi.authorize_console(context, connect_info['token'], console_type, connect_info['host'], connect_info['port'], connect_info['internal_access_path'], instance_uuid=instance['uuid']) return {'url': connect_info['access_url']} @validate_cell def get_console_output(self, context, instance, *args, **kwargs): """Get console output for an an instance.""" # NOTE(comstud): Calling super() just to get policy check super(ComputeCellsAPI, self).get_console_output(context, instance, *args, **kwargs) return self._call_to_cells(context, instance, 'get_console_output', *args, **kwargs) def lock(self, context, instance): """Lock the given instance.""" super(ComputeCellsAPI, self).lock(context, instance) self._cast_to_cells(context, instance, 'lock') def unlock(self, context, instance): """Unlock the given instance.""" super(ComputeCellsAPI, self).lock(context, instance) self._cast_to_cells(context, instance, 'unlock') @validate_cell def reset_network(self, context, instance): """Reset networking on the instance.""" super(ComputeCellsAPI, self).reset_network(context, instance) self._cast_to_cells(context, instance, 'reset_network') @validate_cell def inject_network_info(self, context, instance): """Inject network info for the instance.""" super(ComputeCellsAPI, self).inject_network_info(context, instance) self._cast_to_cells(context, instance, 'inject_network_info') @wrap_check_policy @validate_cell def attach_volume(self, context, instance, volume_id, device=None): """Attach an existing volume to an existing instance.""" if device and not block_device.match_device(device): raise exception.InvalidDevicePath(path=device) device = self.compute_rpcapi.reserve_block_device_name( context, device=device, instance=instance, volume_id=volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.check_attach(context, volume, instance=instance) except Exception: with excutils.save_and_reraise_exception(): self.db.block_device_mapping_destroy_by_instance_and_device( context, instance['uuid'], device) self._cast_to_cells(context, instance, 'attach_volume', volume_id, device) @check_instance_lock @validate_cell def _detach_volume(self, context, instance, volume_id): """Detach a volume from an instance.""" check_policy(context, 'detach_volume', instance) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) self._cast_to_cells(context, instance, 'detach_volume', volume_id) @wrap_check_policy @validate_cell def associate_floating_ip(self, context, instance, address): """Makes calls to network_api to associate_floating_ip. :param address: is a string floating ip address """ self._cast_to_cells(context, instance, 'associate_floating_ip', address) @validate_cell def delete_instance_metadata(self, context, instance, key): """Delete the given metadata item from an instance.""" super(ComputeCellsAPI, self).delete_instance_metadata(context, instance, key) self._cast_to_cells(context, instance, 'delete_instance_metadata', key) @wrap_check_policy @validate_cell def update_instance_metadata(self, context, instance, metadata, delete=False): rv = super(ComputeCellsAPI, self).update_instance_metadata(context, instance, metadata, delete=delete) try: self._cast_to_cells(context, instance, 'update_instance_metadata', metadata, delete=delete) except exception.InstanceUnknownCell: pass return rv class HostAPI(compute_api.HostAPI): """HostAPI() class for cells. Implements host management related operations. Works by setting the RPC API used by the base class to proxy via the cells manager to the compute manager in the correct cell. Hosts specified with cells will need to be of the format 'path!to!cell@host'. DB methods in the base class are also overridden to proxy via the cells manager. """ def __init__(self): super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI()) self.cells_rpcapi = cells_rpcapi.CellsAPI() def _assert_host_exists(self, context, host_name): """Cannot check this in API cell. This will be checked in the target child cell. """ pass def service_get_all(self, context, filters=None, set_zones=False): if filters is None: filters = {} if 'availability_zone' in filters: zone_filter = filters.pop('availability_zone') set_zones = True else: zone_filter = None services = self.cells_rpcapi.service_get_all(context, filters=filters) if set_zones: services = availability_zones.set_availability_zones(context, services) if zone_filter is not None: services = [s for s in services if s['availability_zone'] == zone_filter] return services def service_get_by_compute_host(self, context, host_name): return self.cells_rpcapi.service_get_by_compute_host(context, host_name) def instance_get_all_by_host(self, context, host_name): """Get all instances by host. Host might have a cell prepended to it, so we'll need to strip it out. We don't need to proxy this call to cells, as we have instance information here in the API cell. """ cell_name, host_name = cells_utils.split_cell_and_item(host_name) instances = super(HostAPI, self).instance_get_all_by_host(context, host_name) if cell_name: instances = [i for i in instances if i['cell_name'] == cell_name] return instances def task_log_get_all(self, context, task_name, beginning, ending, host=None, state=None): """Return the task logs within a given range from cells, optionally filtering by the host and/or state. For cells, the host should be a path like 'path!to!cell@host'. If no @host is given, only task logs from a particular cell will be returned. """ return self.cells_rpcapi.task_log_get_all(context, task_name, beginning, ending, host=host, state=state) def compute_node_get(self, context, compute_id): """Get a compute node from a particular cell by its integer ID. compute_id should be in the format of 'path!to!cell@ID'. """ return self.cells_rpcapi.compute_node_get(context, compute_id) def compute_node_get_all(self, context): return self.cells_rpcapi.compute_node_get_all(context) def compute_node_search_by_hypervisor(self, context, hypervisor_match): return self.cells_rpcapi.compute_node_get_all(context, hypervisor_match=hypervisor_match) def compute_node_statistics(self, context): return self.cells_rpcapi.compute_node_stats(context) class InstanceActionAPI(compute_api.InstanceActionAPI): """InstanceActionAPI() class for cells.""" def __init__(self): super(InstanceActionAPI, self).__init__() self.cells_rpcapi = cells_rpcapi.CellsAPI() def actions_get(self, context, instance): return self.cells_rpcapi.actions_get(context, instance) def action_get_by_request_id(self, context, instance, request_id): return self.cells_rpcapi.action_get_by_request_id(context, instance, request_id) def action_events_get(self, context, instance, action_id): return self.cells_rpcapi.action_events_get(context, instance, action_id)
from __future__ import absolute_import from __future__ import unicode_literals import collections import contextlib import imp import os import sys import pkg_resources Arguments = collections.namedtuple('Arguments', ('all', 'patches', 'cmd')) class PymonkeySystemExit(SystemExit): pass class PymonkeyError(RuntimeError): pass HELPMSG = '''\ usage: {} [-h] [--debug] [--all] [patches [patches ...]] -- cmd [cmd ...] A tool for applying monkeypatches to python executables. Patches are \ registered by supplying a setuptools entrypoint for `pymonkey`. Patches are \ selected by listing them on the commandline when running the pymonkey tool. \ For example, consider a registered patch pip_faster when using pip. An \ invocation may look like `pymonkey pip_faster -- pip install ...`. positional arguments: patches cmd optional arguments: - h, --help show this help message and exit --all Apply all known patches'''.format(sys.argv[0]) def print_std_err(s): sys.stderr.write(s + '\n') sys.stderr.flush() def DEBUG(msg): if 'PYMONKEY_DEBUG' in os.environ: print_std_err('pymonkey: ' + msg) def print_help_and_exit(): print_std_err(HELPMSG) raise PymonkeySystemExit() def manual_argument_parsing(argv): """sadness because argparse doesn't quite do what we want.""" # Special case these for a better error message if not argv or argv == ['-h'] or argv == ['--help']: print_help_and_exit() try: dashdash_index = argv.index('--') except ValueError: print_std_err('Must separate command by `--`') print_help_and_exit() patches, cmd = argv[:dashdash_index], argv[dashdash_index + 1:] if '--help' in patches or '-h' in patches: print_help_and_exit() if '--all' in patches: all_patches = True patches.remove('--all') else: all_patches = False unknown_options = [patch for patch in patches if patch.startswith('-')] if unknown_options: print_std_err('Unknown options: {!r}'.format(unknown_options)) print_help_and_exit() if patches and all_patches: print_std_err('--all and patches specified: {!r}'.format(patches)) print_help_and_exit() return Arguments(all=all_patches, patches=tuple(patches), cmd=tuple(cmd)) def importmod(mod): return __import__(mod, fromlist=[str('__name__')], level=0) def _noop(*a, **k): return None class PymonkeyImportHook(object): """This is where the magic happens. This import hook is responsible for the following things: - It will load all modules - In loading, it'll first invoke builtin import. - It'll then pass the module that it imported through each of the pymonkey hooks. """ def __init__(self, hooks): self._hooks = hooks self._entry_data = dict.fromkeys(hooks) self._handling = [] def _module_exists(self, module, path): # First check other entries in metapath for the module # Otherwise, try basic python import logic for entry in sys.meta_path: if ( entry is not self and ( getattr(entry, 'find_spec', _noop)(module, path) or getattr(entry, 'find_module', _noop)(module, path) ) ): return True # We're either passed: # - A toplevel module name and `None` for path # - The fullpath to a module and a list for path # imp.find_module takes the following: # - A toplevel module name and `None` for path # - A subpackage and a list for path # Solution: # Convert the full modulename we're given into the subpackage if path is not None: to_try_mod = module.split('.')[-1] else: to_try_mod = module try: imp.find_module(to_try_mod, path) return True # pragma: no cover (PY3 import is via sys.meta_path) except ImportError: return False @contextlib.contextmanager def handling(self, modname): self._handling.append(modname) try: yield finally: popped = self._handling.pop() assert popped == modname, (popped, modname) def find_module(self, fullname, path=None): # Shortcut if we're already processing this module if fullname in self._handling: DEBUG('already handling {}'.format(fullname)) return # Make sure we can actually handle this module elif self._module_exists(fullname, path): DEBUG('found {}'.format(fullname)) return self else: DEBUG('not found {}'.format(fullname)) return def load_module(self, fullname): # Since we're going to invoke the import machinery and hit ourselves # again, store some state so we don't recurse forever with self.handling(fullname): module = importmod(fullname) for entry, hook_fn in self._hooks.items(): hook_fn(module, self._entry_data[entry]) return module def set_entry_data(self, entry, data): self._entry_data[entry] = data @contextlib.contextmanager def assert_no_other_modules_imported(imported_modname): def getmods(): return {modname for modname, mod in sys.modules.items() if mod} before = getmods() yield after = getmods() unexpected_imports = sorted( modname for modname in after - before if not imported_modname.startswith(modname) ) if unexpected_imports: raise PymonkeyError( 'pymonkey modules must not trigger imports at the module scope. ' 'The following modules were imported while importing {}:\n' '{}'.format( imported_modname, '\t' + '\t\n'.join(unexpected_imports), ), ) def get_entry_callables(all_patches, patches, pymonkey_entry_points, attr): def _to_callable(entry_point): """If they give us a module, retrieve `attr`""" with assert_no_other_modules_imported(entry_point.module_name): # Load the module manually to avoid pkg_resources side-effects loaded = importmod(entry_point.module_name) for entry_attr in entry_point.attrs: loaded = getattr(loaded, entry_attr) if callable(loaded): return loaded else: return getattr(loaded, attr) if all_patches: entry_points = pymonkey_entry_points else: all_entries = {entry.name: entry for entry in pymonkey_entry_points} missing = set(patches) - set(all_entries) if missing: print_std_err('Could not find patch(es): {}'.format(missing)) raise PymonkeySystemExit(1) entry_points = [all_entries[name] for name in patches] return {entry.name: _to_callable(entry) for entry in entry_points} def main(argv=None): argv = argv if argv is not None else sys.argv[1:] args = manual_argument_parsing(argv) # Register patches callables = get_entry_callables( args.all, args.patches, tuple(pkg_resources.iter_entry_points('pymonkey')), attr='pymonkey_patch', ) hook = PymonkeyImportHook(callables) # Important to insert at the beginning to be ahead of the stdlib importer sys.meta_path.insert(0, hook) # Allow hooks to do argument parsing argv_callables = get_entry_callables( args.all, args.patches, tuple(pkg_resources.iter_entry_points('pymonkey.argparse')), attr='pymonkey_argparse', ) cmd, rest = args.cmd[0], tuple(args.cmd[1:]) for entry_name, argv_callable in argv_callables.items(): args, rest = tuple(argv_callable(rest)) hook.set_entry_data(entry_name, args) # Call the thing entry, = tuple(pkg_resources.iter_entry_points('console_scripts', cmd)) sys.argv = [cmd] + list(rest) return entry.load()() def make_entry_point(patches, original_entry_point): """Use this to make a console_script entry point for your application which applies patches. :param patches: iterable of pymonkey patches to apply. Ex: ('my-patch,) :param original_entry_point: Such as 'pip' """ def entry(argv=None): argv = argv if argv is not None else sys.argv[1:] return main( tuple(patches) + ('--', original_entry_point) + tuple(argv) ) return entry if __name__ == '__main__': sys.exit(main())
# -*- coding: utf-8 -*- ''' Rofi is a TraitMenu subclass that was generated by the scripts in dynmen/utils. ''' from dynmen.common import TraitMenu, Flag, Option class Rofi(TraitMenu): _aliases = [('sep', 'entry_sep'), ('p', 'prompt'), ('i', 'case_insensitive')] _base_command = ['rofi'] _version = 'Version: 1.5.0-dirty (tags/1.5.0)' a = Option('-a', help='List of row indexes to mark active') async_pre_read = Option( '-async-pre-read', help='Read several entries blocking before switching to async mode @@@ 25' ) auto_select = Flag('-auto-select', help='Enable auto select mode @@@ False (Default)') bw = Option('-bw', help='Border width @@@ 1 (Default)') case_sensitive = Flag( '-case-sensitive', help='Set case-sensitivity @@@ False (Default)' ) click_to_exit = Flag( '-click-to-exit', help='Click outside the window to exit @@@ True (Default)' ) color_active = Option( '-color-active', help='Color scheme for active row @@@ argb:00000000, #6699CC , argb:00000000, #6699CC , #1B2B34 (XResources)' ) color_normal = Option( '-color-normal', help='Color scheme for normal row @@@ argb:00000000, #D8DEE9 , argb:00000000, #FAC863 , #1B2B34 (XResources)' ) color_urgent = Option( '-color-urgent', help='Color scheme for urgent row @@@ argb:00000000, #F99157 , argb:00000000, #F99157 , #1B2B34 (XResources)' ) color_window = Option( '-color-window', help='Color scheme window @@@ argb:ee222222, #FAC863 , #FAC863 (XResources)' ) columns = Option('-columns', help='Number of columns @@@ 1 (Default)') combi_hide_mode_prefix = Flag( '-combi-hide-mode-prefix', help='Hide the prefix mode prefix on the combi view. @@@ False (Default)' ) combi_modi = Option( '-combi-modi', help='Set the modi to combine in combi mode @@@ window,run (Default)' ) cycle = Flag('-cycle', help='Cycle through the results list @@@ True (Default)') disable_history = Flag( '-disable-history', help='Disable history in run/ssh @@@ False (Default)' ) display = Option('-display', help='X server to contact. @@@ ${DISPLAY}') display_combi = Option( '-display-combi', help='The display name of this browser @@@ (unset) (Default)' ) display_drun = Option( '-display-drun', help='The display name of this browser @@@ (unset) (Default)' ) display_keys = Option( '-display-keys', help='The display name of this browser @@@ (unset) (Default)' ) display_run = Option( '-display-run', help='The display name of this browser @@@ (unset) (Default)' ) display_ssh = Option( '-display-ssh', help='The display name of this browser @@@ (unset) (Default)' ) display_window = Option( '-display-window', help='The display name of this browser @@@ (unset) (Default)' ) display_windowcd = Option( '-display-windowcd', help='The display name of this browser @@@ (unset) (Default)' ) dmenu = Flag('-dmenu', default_value=True) dpi = Option('-dpi', help='DPI @@@ -1 (Default)') drun_icon_theme = Option( '-drun-icon-theme', help='Theme to use to look for icons @@@ (unset) (Default)' ) drun_match_fields = Option( '-drun-match-fields', help='Desktop entry fields to match in drun @@@ name,generic,exec,categories (Default)') dump_config = Flag( '-dump-config', help='Dump the current configuration in rasi format and exit.' ) dump_theme = Flag( '-dump-theme', help='Dump the current theme in rasi format and exit.' ) dump_xresources = Flag( '-dump-xresources', help='Dump the current configuration in Xresources format and exit.' ) e = Option('-e', help='Show a dialog displaying the passed message and exit.') eh = Option('-eh', help='Row height (in chars) @@@ 1 (Default)') fake_background = Option( '-fake-background', help='Background to use for fake transparency. (background or screenshot) @@@ screenshot (Default)' ) fake_transparency = Flag( '-fake-transparency', help='Fake transparency *DEPRECATED* @@@ False (Default)' ) filter = Option('-filter', help='Pre-set filter @@@ (unset) (Default)') fixed_num_lines = Flag( '-fixed-num-lines', help='Always show number of lines @@@ True (Default)' ) font = Option('-font', help='Font to use @@@ mono 12 (Default)') format = Option('-format', help='Output format string @@@ s') fullscreen = Flag('-fullscreen', help='Fullscreen @@@ False (Default)') help = Flag('-help', help='This help message.') hide_scrollbar = Flag( '-hide-scrollbar', help='Hide scroll-bar *DEPRECATED* @@@ False (Default)' ) i = Flag('-i', help='Set filter to be case insensitive') input = Option('-input', help='Read input from file instead from standard input.') kb_accept_alt = Option( '-kb-accept-alt', help='Use alternate accept command. @@@ Shift+Return (Default)' ) kb_accept_custom = Option( '-kb-accept-custom', help='Use entered text as command (in ssh/run modi) @@@ Control+Return (Default)' ) kb_accept_entry = Option( '-kb-accept-entry', help='Accept entry @@@ Control+j,Control+m,Return,KP_Enter (Default)' ) kb_cancel = Option( '-kb-cancel', help='Quit rofi @@@ Escape,Control+g,Control+bracketleft (Default)' ) kb_clear_line = Option( '-kb-clear-line', help='Clear input line @@@ Control+w (Default)' ) kb_custom_1 = Option('-kb-custom-1', help='Custom keybinding 1 @@@ Alt+1 (Default)') kb_custom_10 = Option( '-kb-custom-10', help='Custom keybinding 10 @@@ Alt+0 (Default)' ) kb_custom_11 = Option( '-kb-custom-11', help='Custom keybinding 11 @@@ Alt+exclam (Default)' ) kb_custom_12 = Option( '-kb-custom-12', help='Custom keybinding 12 @@@ Alt+at (Default)' ) kb_custom_13 = Option( '-kb-custom-13', help='Csutom keybinding 13 @@@ Alt+numbersign (Default)' ) kb_custom_14 = Option( '-kb-custom-14', help='Custom keybinding 14 @@@ Alt+dollar (Default)' ) kb_custom_15 = Option( '-kb-custom-15', help='Custom keybinding 15 @@@ Alt+percent (Default)' ) kb_custom_16 = Option( '-kb-custom-16', help='Custom keybinding 16 @@@ Alt+dead_circumflex (Default)' ) kb_custom_17 = Option( '-kb-custom-17', help='Custom keybinding 17 @@@ Alt+ampersand (Default)' ) kb_custom_18 = Option( '-kb-custom-18', help='Custom keybinding 18 @@@ Alt+asterisk (Default)' ) kb_custom_19 = Option( '-kb-custom-19', help='Custom Keybinding 19 @@@ Alt+parenleft (Default)' ) kb_custom_2 = Option('-kb-custom-2', help='Custom keybinding 2 @@@ Alt+2 (Default)') kb_custom_3 = Option('-kb-custom-3', help='Custom keybinding 3 @@@ Alt+3 (Default)') kb_custom_4 = Option('-kb-custom-4', help='Custom keybinding 4 @@@ Alt+4 (Default)') kb_custom_5 = Option('-kb-custom-5', help='Custom Keybinding 5 @@@ Alt+5 (Default)') kb_custom_6 = Option('-kb-custom-6', help='Custom keybinding 6 @@@ Alt+6 (Default)') kb_custom_7 = Option('-kb-custom-7', help='Custom Keybinding 7 @@@ Alt+7 (Default)') kb_custom_8 = Option('-kb-custom-8', help='Custom keybinding 8 @@@ Alt+8 (Default)') kb_custom_9 = Option('-kb-custom-9', help='Custom keybinding 9 @@@ Alt+9 (Default)') kb_delete_entry = Option( '-kb-delete-entry', help='Delete entry from history @@@ Shift+Delete (Default)' ) kb_mode_next = Option( '-kb-mode-next', help='Switch to the next mode. @@@ Shift+Right,Control+Tab (Default)' ) kb_mode_previous = Option( '-kb-mode-previous', help='Switch to the previous mode. @@@ Shift+Left,Control+ISO_Left_Tab (Default)' ) kb_move_char_back = Option( '-kb-move-char-back', help='Move back one char @@@ Left,Control+b (Default)' ) kb_move_char_forward = Option( '-kb-move-char-forward', help='Move forward one char @@@ Right,Control+f (Default)' ) kb_move_end = Option('-kb-move-end', help='End of line @@@ Control+e (Default)') kb_move_front = Option( '-kb-move-front', help='Beginning of line @@@ Control+a (Default)' ) kb_move_word_back = Option( '-kb-move-word-back', help='Move back one word @@@ Alt+b (Default)' ) kb_move_word_forward = Option( '-kb-move-word-forward', help='Move forward one word @@@ Alt+f (Default)' ) kb_page_next = Option( '-kb-page-next', help='Go to the next page @@@ Page_Down (Default)' ) kb_page_prev = Option( '-kb-page-prev', help='Go to the previous page @@@ Page_Up (Default)' ) kb_primary_paste = Option( '-kb-primary-paste', help='Paste primary selection @@@ Control+V,Shift+Insert (Default)' ) kb_remove_char_back = Option( '-kb-remove-char-back', help='Delete previous char @@@ BackSpace,Control+h (Default)' ) kb_remove_char_forward = Option( '-kb-remove-char-forward', help='Delete next char @@@ Delete,Control+d (Default)' ) kb_remove_to_eol = Option( '-kb-remove-to-eol', help='Delete till the end of line @@@ Control+k (Default)' ) kb_remove_to_sol = Option( '-kb-remove-to-sol', help='Delete till the start of line @@@ Control+u (Default)' ) kb_remove_word_back = Option( '-kb-remove-word-back', help='Delete previous word @@@ Control+Alt+h,Control+BackSpace (Default)' ) kb_remove_word_forward = Option( '-kb-remove-word-forward', help='Delete next word @@@ Control+Alt+d (Default)' ) kb_row_down = Option( '-kb-row-down', help='Select next entry @@@ Down,Control+n (Default)' ) kb_row_first = Option( '-kb-row-first', help='Go to the first entry @@@ Home,KP_Home (Default)' ) kb_row_last = Option( '-kb-row-last', help='Go to the last entry @@@ End,KP_End (Default)' ) kb_row_left = Option( '-kb-row-left', help='Go to the previous column @@@ Control+Page_Up (Default)' ) kb_row_right = Option( '-kb-row-right', help='Go to the next column @@@ Control+Page_Down (Default)' ) kb_row_select = Option( '-kb-row-select', help='Set selected item as input text @@@ Control+space (Default)' ) kb_row_tab = Option( '-kb-row-tab', help='Go to next row, if one left, accept it, if no left next mode. @@@ Tab (Default)') kb_row_up = Option( '-kb-row-up', help='Select previous entry @@@ Up,Control+p,ISO_Left_Tab (Default)' ) kb_screenshot = Option( '-kb-screenshot', help='Take a screenshot of the rofi window @@@ Alt+S (Default)' ) kb_secondary_paste = Option( '-kb-secondary-paste', help='Paste clipboard @@@ Control+v,Insert (Default)' ) kb_select_1 = Option('-kb-select-1', help='Select row 1 @@@ Super+1 (Default)') kb_select_10 = Option('-kb-select-10', help='Select row 10 @@@ Super+0 (Default)') kb_select_2 = Option('-kb-select-2', help='Select row 2 @@@ Super+2 (Default)') kb_select_3 = Option('-kb-select-3', help='Select row 3 @@@ Super+3 (Default)') kb_select_4 = Option('-kb-select-4', help='Select row 4 @@@ Super+4 (Default)') kb_select_5 = Option('-kb-select-5', help='Select row 5 @@@ Super+5 (Default)') kb_select_6 = Option('-kb-select-6', help='Select row 6 @@@ Super+6 (Default)') kb_select_7 = Option('-kb-select-7', help='Select row 7 @@@ Super+7 (Default)') kb_select_8 = Option('-kb-select-8', help='Select row 8 @@@ Super+8 (Default)') kb_select_9 = Option('-kb-select-9', help='Select row 9 @@@ Super+9 (Default)') kb_toggle_case_sensitivity = Option( '-kb-toggle-case-sensitivity', help='Toggle case sensitivity @@@ grave,dead_grave (Default)' ) kb_toggle_sort = Option('-kb-toggle-sort', help='Toggle sort @@@ Alt+grave (Default)') l = Option('-l', help='Number of rows to display') levenshtein_sort = Flag( '-levenshtein-sort', help='Use levenshtein sorting also for fuzzy matching @@@ False (Default)' ) line_margin = Option( '-line-margin', help='Margin between rows *DEPRECATED* @@@ 2 (Default)' ) line_padding = Option( '-line-padding', help='Padding within rows *DEPRECATED* @@@ 1 (Default)' ) lines = Option('-lines', help='Number of lines @@@ 15 (Default)') location = Option('-location', help='Location on screen @@@ 0 (Default)') m = Option('-m', help='Monitor id to show on @@@ -5 (Default)') markup = Flag('-markup', help='Enable pango markup where possible.') markup_rows = Flag( '-markup-rows', help='Allow and render pango markup as input data.' ) matching = Option( '-matching', help='Set the matching algorithm. (normal, regex, glob, fuzzy) @@@ normal (Default)') max_history_size = Option( '-max-history-size', help='Max history size (WARNING: can cause slowdowns when set to high). @@@ 25 (Default)') me_accept_custom = Option( '-me-accept-custom', help='Accept hovered row with custom action @@@ Control+MouseDPrimary (Default)' ) me_accept_entry = Option( '-me-accept-entry', help='Accept hovered row @@@ MouseDPrimary (Default)' ) me_select_entry = Option( '-me-select-entry', help='Select hovered row @@@ MousePrimary (Default)' ) mesg = Option( '-mesg', help='Print a small user message under the prompt (uses pango markup)' ) ml_row_down = Option( '-ml-row-down', help='Select next entry @@@ ScrollDown (Default)' ) ml_row_left = Option( '-ml-row-left', help='Go to the previous column @@@ ScrollLeft (Default)' ) ml_row_right = Option( '-ml-row-right', help='Go to the next column @@@ ScrollRight (Default)' ) ml_row_up = Option('-ml-row-up', help='Select previous entry @@@ ScrollUp (Default)') modi = Option('-modi', help='Enabled modi @@@ window,run,ssh (Default)') no_auto_select = Flag( '-no-auto-select', help='Enable auto select mode @@@ False (Default)' ) no_case_sensitive = Flag( '-no-case-sensitive', help='Set case-sensitivity @@@ False (Default)' ) no_click_to_exit = Flag( '-no-click-to-exit', help='Click outside the window to exit @@@ True (Default)' ) no_combi_hide_mode_prefix = Flag( '-no-combi-hide-mode-prefix', help='Hide the prefix mode prefix on the combi view. @@@ False (Default)' ) no_config = Flag('-no-config', help='Do not load configuration, use default values.') no_custom = Flag('-no-custom', help="Don't accept custom entry") no_cycle = Flag('-no-cycle', help='Cycle through the results list @@@ True (Default)') no_disable_history = Flag( '-no-disable-history', help='Disable history in run/ssh @@@ False (Default)' ) no_fake_transparency = Flag( '-no-fake-transparency', help='Fake transparency *DEPRECATED* @@@ False (Default)' ) no_fixed_num_lines = Flag( '-no-fixed-num-lines', help='Always show number of lines @@@ True (Default)' ) no_fullscreen = Flag('-no-fullscreen', help='Fullscreen @@@ False (Default)') no_hide_scrollbar = Flag( '-no-hide-scrollbar', help='Hide scroll-bar *DEPRECATED* @@@ False (Default)' ) no_lazy_grab = Flag( '-no-lazy-grab', help='Disable lazy grab that, when fail to grab keyboard, does not block but retry later.' ) no_levenshtein_sort = Flag( '-no-levenshtein-sort', help='Use levenshtein sorting also for fuzzy matching @@@ False (Default)' ) no_parse_hosts = Flag( '-no-parse-hosts', help='Parse hosts file for ssh mode @@@ False (Default)' ) no_parse_known_hosts = Flag( '-no-parse-known-hosts', help='Parse known_hosts file for ssh mode @@@ True (Default)' ) no_plugins = Flag('-no-plugins', help='Disable loading of external plugins.') no_show_icons = Flag( '-no-show-icons', help='Whether to load and show icons @@@ False (Default)' ) no_show_match = Flag( '-no-show-match', help='Indicate how it match by underlining it. @@@ True (Default)' ) no_sidebar_mode = Flag( '-no-sidebar-mode', help='Enable sidebar-mode @@@ False (Default)' ) no_sort = Flag('-no-sort', help='Use sorting @@@ False (Default)') no_tokenize = Flag('-no-tokenize', help='Tokenize input string @@@ True (Default)') normal_window = Flag( '-normal-window', help='In dmenu mode, behave as a normal window. (experimental)' ) only_match = Flag('-only-match', help='Force selection or custom entry') p = Option('-p', help='Prompt to display left of entry field') padding = Option('-padding', help='Padding @@@ 5 (Default)') parse_hosts = Flag( '-parse-hosts', help='Parse hosts file for ssh mode @@@ False (Default)' ) parse_known_hosts = Flag( '-parse-known-hosts', help='Parse known_hosts file for ssh mode @@@ True (Default)' ) password = Flag( '-password', help="Do not show what the user inputs. Show '*' instead." ) pid = Option('-pid', help='Pidfile location @@@ /run/user/1001/rofi.pid (Default)') plugin_path = Flag('-plugin-path', help='Directory used to search for rofi plugins.') run_command = Option( '-run-command', help='Run command to execute @@@ {cmd} (Default)' ) run_list_command = Option( '-run-list-command', help='Command to get extra run targets @@@ (Default)' ) run_shell_command = Option( '-run-shell-command', help='Run command to execute that runs in shell @@@ {terminal} -e {cmd} (Default)' ) scroll_method = Option( '-scroll-method', help='Scrolling method. (0: Page, 1: Centered) @@@ 0 (Default)' ) scrollbar_width = Option( '-scrollbar-width', help='Scrollbar width *DEPRECATED* @@@ 8 (Default)' ) select = Option('-select', help='Select the first row that matches') selected_row = Option('-selected-row', help='Select row') sep = Option('-sep', default_value='\x00') separator_style = Option( '-separator-style', help='Separator style (none, dash, solid) *DEPRECATED* @@@ solid (XResources)' ) show = Option( '-show', help="Show the mode 'mode' and exit. The mode has to be enabled." ) show_icons = Flag( '-show-icons', help='Whether to load and show icons @@@ False (Default)' ) show_match = Flag( '-show-match', help='Indicate how it match by underlining it. @@@ True (Default)' ) sidebar_mode = Flag('-sidebar-mode', help='Enable sidebar-mode @@@ False (Default)') sort = Flag('-sort', help='Use sorting @@@ False (Default)') ssh_client = Option('-ssh-client', help='Ssh client to use @@@ ssh (Default)') ssh_command = Option( '-ssh-command', help='Ssh command to execute @@@ {terminal} -e {ssh-client} {host} (Default)' ) sync = Flag( '-sync', help='Force dmenu to first read all input data, then show dialog.' ) terminal = Option( '-terminal', help='Terminal to use @@@ rofi-sensible-terminal (Default)' ) theme = Option('-theme', help='New style theme file @@@ (unset) (Default)') threads = Option( '-threads', help='Threads to use for string matching @@@ 0 (Default)' ) tokenize = Flag('-tokenize', help='Tokenize input string @@@ True (Default)') u = Option('-u', help='List of row indexes to mark urgent') version = Flag('-version', help='Print the version number and exit.') w = Flag( '-w', help='windowid Position over window with X11 windowid.' ) width = Option('-width', help='Window width @@@ 50 (Default)') window_command = Option( '-window-command', help='Command executed on accep-entry-custom for window modus @@@ xkill -id {window} (Default)' ) window_format = Option( '-window-format', help='Window Format. w (desktop name), t (title), n (name), r (role), c (class) *DEPRECATED* @@@ {w} {i}{c} {t} (Default)' ) window_match_fields = Option( '-window-match-fields', help='Window fields to match in window mode @@@ all (Default)' ) xoffset = Option('-xoffset', help='X-offset relative to location @@@ 0 (Default)') yoffset = Option('-yoffset', help='Y-offset relative to location @@@ 0 (Default)')
#!/usr/bin/env python # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run interop (cross-language) tests in parallel.""" from __future__ import print_function import argparse import atexit import itertools import json import multiprocessing import os import re import subprocess import sys import tempfile import time import uuid import six import traceback import python_utils.dockerjob as dockerjob import python_utils.jobset as jobset import python_utils.report_utils as report_utils # It's ok to not import because this is only necessary to upload results to BQ. try: from python_utils.upload_test_results import upload_interop_results_to_bq except ImportError as e: print(e) # Docker doesn't clean up after itself, so we do it on exit. atexit.register(lambda: subprocess.call(['stty', 'echo'])) ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(ROOT) _DEFAULT_SERVER_PORT = 8080 _SKIP_CLIENT_COMPRESSION = [ 'client_compressed_unary', 'client_compressed_streaming' ] _SKIP_SERVER_COMPRESSION = [ 'server_compressed_unary', 'server_compressed_streaming' ] _SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION _SKIP_ADVANCED = [ 'status_code_and_message', 'custom_metadata', 'unimplemented_method', 'unimplemented_service' ] _TEST_TIMEOUT = 3 * 60 # disable this test on core-based languages, # see https://github.com/grpc/grpc/issues/9779 _SKIP_DATA_FRAME_PADDING = ['data_frame_padding'] # report suffix is important for reports to get picked up by internal CI _INTERNAL_CL_XML_REPORT = 'sponge_log.xml' # report suffix is important for reports to get picked up by internal CI _XML_REPORT = 'report.xml' class CXXLanguage: def __init__(self): self.client_cwd = None self.server_cwd = None self.http2_cwd = None self.safename = 'cxx' def client_cmd(self, args): return ['bins/opt/interop_client'] + args def client_cmd_http2interop(self, args): return ['bins/opt/http2_client'] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return ['bins/opt/interop_server'] + args def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return [] def __str__(self): return 'c++' class CSharpLanguage: def __init__(self): self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45' self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45' self.safename = str(self) def client_cmd(self, args): return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'csharp' class CSharpCoreCLRLanguage: def __init__(self): self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0' self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0' self.safename = str(self) def client_cmd(self, args): return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'csharpcoreclr' class DartLanguage: def __init__(self): self.client_cwd = '../grpc-dart/interop' self.server_cwd = '../grpc-dart/interop' self.http2_cwd = '../grpc-dart/interop' self.safename = str(self) def client_cmd(self, args): return ['dart', 'bin/client.dart'] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return ['dart', 'bin/server.dart'] + args def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_COMPRESSION def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'dart' class JavaLanguage: def __init__(self): self.client_cwd = '../grpc-java' self.server_cwd = '../grpc-java' self.http2_cwd = '../grpc-java' self.safename = str(self) def client_cmd(self, args): return ['./run-test-client.sh'] + args def client_cmd_http2interop(self, args): return [ './interop-testing/build/install/grpc-interop-testing/bin/http2-client' ] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return ['./run-test-server.sh'] + args def global_env(self): return {} def unimplemented_test_cases(self): return [] def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'java' class JavaOkHttpClient: def __init__(self): self.client_cwd = '../grpc-java' self.safename = 'java' def client_cmd(self, args): return ['./run-test-client.sh', '--use_okhttp=true'] + args def cloud_to_prod_env(self): return {} def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_DATA_FRAME_PADDING def __str__(self): return 'javaokhttp' class GoLanguage: def __init__(self): # TODO: this relies on running inside docker self.client_cwd = '/go/src/google.golang.org/grpc/interop/client' self.server_cwd = '/go/src/google.golang.org/grpc/interop/server' self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2' self.safename = str(self) def client_cmd(self, args): return ['go', 'run', 'client.go'] + args def client_cmd_http2interop(self, args): return ['go', 'run', 'negative_http2_client.go'] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return ['go', 'run', 'server.go'] + args def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_COMPRESSION def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'go' class Http2Server: """Represents the HTTP/2 Interop Test server This pretends to be a language in order to be built and run, but really it isn't. """ def __init__(self): self.server_cwd = None self.safename = str(self) def server_cmd(self, args): return ['python test/http2_test/http2_test_server.py'] def cloud_to_prod_env(self): return {} def global_env(self): return {} def unimplemented_test_cases(self): return _TEST_CASES + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return _TEST_CASES def __str__(self): return 'http2' class Http2Client: """Represents the HTTP/2 Interop Test This pretends to be a language in order to be built and run, but really it isn't. """ def __init__(self): self.client_cwd = None self.safename = str(self) def client_cmd(self, args): return ['tools/http2_interop/http2_interop.test', '-test.v'] + args def cloud_to_prod_env(self): return {} def global_env(self): return {} def unimplemented_test_cases(self): return _TEST_CASES def unimplemented_test_cases_server(self): return _TEST_CASES def __str__(self): return 'http2' class NodeLanguage: def __init__(self): self.client_cwd = '../grpc-node' self.server_cwd = '../grpc-node' self.safename = str(self) def client_cmd(self, args): return [ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh', 'node', '--require', './test/fixtures/native_native', 'test/interop/interop_client.js' ] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return [ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh', 'node', '--require', './test/fixtures/native_native', 'test/interop/interop_server.js' ] + args def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'node' class NodePureJSLanguage: def __init__(self): self.client_cwd = '../grpc-node' self.server_cwd = '../grpc-node' self.safename = str(self) def client_cmd(self, args): return [ 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh', 'node', '--require', './test/fixtures/js_js', 'test/interop/interop_client.js' ] + args def cloud_to_prod_env(self): return {} def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return [] def __str__(self): return 'nodepurejs' class PHPLanguage: def __init__(self): self.client_cwd = None self.safename = str(self) def client_cmd(self, args): return ['src/php/bin/interop_client.sh'] + args def cloud_to_prod_env(self): return {} def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return [] def __str__(self): return 'php' class PHP7Language: def __init__(self): self.client_cwd = None self.safename = str(self) def client_cmd(self, args): return ['src/php/bin/interop_client.sh'] + args def cloud_to_prod_env(self): return {} def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return [] def __str__(self): return 'php7' class ObjcLanguage: def __init__(self): self.client_cwd = 'src/objective-c/tests' self.safename = str(self) def client_cmd(self, args): # from args, extract the server port and craft xcodebuild command out of it for arg in args: port = re.search('--server_port=(\d+)', arg) if port: portnum = port.group(1) cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum return [cmdline] def cloud_to_prod_env(self): return {} def global_env(self): return {} def unimplemented_test_cases(self): # ObjC test runs all cases with the same command. It ignores the testcase # cmdline argument. Here we return all but one test cases as unimplemented, # and depend upon ObjC test's behavior that it runs all cases even when # we tell it to run just one. return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'objc' class RubyLanguage: def __init__(self): self.client_cwd = None self.server_cwd = None self.safename = str(self) def client_cmd(self, args): return [ 'tools/run_tests/interop/with_rvm.sh', 'ruby', 'src/ruby/pb/test/client.rb' ] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return [ 'tools/run_tests/interop/with_rvm.sh', 'ruby', 'src/ruby/pb/test/server.rb' ] + args def global_env(self): return {} def unimplemented_test_cases(self): return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'ruby' class PythonLanguage: def __init__(self): self.client_cwd = None self.server_cwd = None self.http2_cwd = None self.safename = str(self) def client_cmd(self, args): return [ 'py27_native/bin/python', 'src/python/grpcio_tests/setup.py', 'run_interop', '--client', '--args="{}"'.format(' '.join(args)) ] def client_cmd_http2interop(self, args): return [ 'py27_native/bin/python', 'src/python/grpcio_tests/tests/http2/negative_http2_client.py', ] + args def cloud_to_prod_env(self): return {} def server_cmd(self, args): return [ 'py27_native/bin/python', 'src/python/grpcio_tests/setup.py', 'run_interop', '--server', '--args="{}"'.format(' '.join(args)) ] def global_env(self): return { 'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT), 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT) } def unimplemented_test_cases(self): return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING def unimplemented_test_cases_server(self): return _SKIP_COMPRESSION def __str__(self): return 'python' _LANGUAGES = { 'c++': CXXLanguage(), 'csharp': CSharpLanguage(), 'csharpcoreclr': CSharpCoreCLRLanguage(), 'dart': DartLanguage(), 'go': GoLanguage(), 'java': JavaLanguage(), 'javaokhttp': JavaOkHttpClient(), 'node': NodeLanguage(), 'nodepurejs': NodePureJSLanguage(), 'php': PHPLanguage(), 'php7': PHP7Language(), 'objc': ObjcLanguage(), 'ruby': RubyLanguage(), 'python': PythonLanguage(), } # languages supported as cloud_to_cloud servers _SERVERS = [ 'c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python', 'dart' ] _TEST_CASES = [ 'large_unary', 'empty_unary', 'ping_pong', 'empty_stream', 'client_streaming', 'server_streaming', 'cancel_after_begin', 'cancel_after_first_response', 'timeout_on_sleeping_server', 'custom_metadata', 'status_code_and_message', 'unimplemented_method', 'client_compressed_unary', 'server_compressed_unary', 'client_compressed_streaming', 'server_compressed_streaming', 'unimplemented_service' ] _AUTH_TEST_CASES = [ 'compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token', 'per_rpc_creds' ] _HTTP2_TEST_CASES = ['tls', 'framing'] _HTTP2_SERVER_TEST_CASES = [ 'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test' ] _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' } _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys( ) _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [ 'java', 'go', 'python', 'c++' ] #TODO: Add c++ when c++ ALTS interop client is ready. _LANGUAGES_FOR_ALTS_TEST_CASES = ['java', 'go'] #TODO: Add c++ when c++ ALTS interop server is ready. _SERVERS_FOR_ALTS_TEST_CASES = ['java', 'go'] _TRANSPORT_SECURITY_OPTIONS = ['tls', 'alts', 'insecure'] DOCKER_WORKDIR_ROOT = '/var/local/git/grpc' def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None): """Wraps given cmdline array to create 'docker run' cmdline from it.""" docker_cmdline = ['docker', 'run', '-i', '--rm=true'] # turn environ into -e docker args if environ: for k, v in environ.items(): docker_cmdline += ['-e', '%s=%s' % (k, v)] # set working directory workdir = DOCKER_WORKDIR_ROOT if cwd: workdir = os.path.join(workdir, cwd) docker_cmdline += ['-w', workdir] docker_cmdline += docker_args + [image] + cmdline return docker_cmdline def manual_cmdline(docker_cmdline, docker_image): """Returns docker cmdline adjusted for manual invocation.""" print_cmdline = [] for item in docker_cmdline: if item.startswith('--name='): continue if item == docker_image: item = "$docker_image" item = item.replace('"', '\\"') # add quotes when necessary if any(character.isspace() for character in item): item = "\"%s\"" % item print_cmdline.append(item) return ' '.join(print_cmdline) def write_cmdlog_maybe(cmdlog, filename): """Returns docker cmdline adjusted for manual invocation.""" if cmdlog: with open(filename, 'w') as logfile: logfile.write('#!/bin/bash\n') logfile.writelines("%s\n" % line for line in cmdlog) print('Command log written to file %s' % filename) def bash_cmdline(cmdline): """Creates bash -c cmdline from args list.""" # Use login shell: # * makes error messages clearer if executables are missing return ['bash', '-c', ' '.join(cmdline)] def compute_engine_creds_required(language, test_case): """Returns True if given test requires access to compute engine creds.""" language = str(language) if test_case == 'compute_engine_creds': return True if test_case == 'oauth2_auth_token' and language == 'c++': # C++ oauth2 test uses GCE creds because C++ only supports JWT return True return False def auth_options(language, test_case, service_account_key_file=None): """Returns (cmdline, env) tuple with cloud_to_prod_auth test options.""" language = str(language) cmdargs = [] env = {} if not service_account_key_file: # this file path only works inside docker service_account_key_file = '/root/service_account/GrpcTesting-726eb1347f15.json' oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo' key_file_arg = '--service_account_key_file=%s' % service_account_key_file default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com' if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']: if language in [ 'csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby', 'nodepurejs' ]: env['GOOGLE_APPLICATION_CREDENTIALS'] = service_account_key_file else: cmdargs += [key_file_arg] if test_case in ['per_rpc_creds', 'oauth2_auth_token']: cmdargs += [oauth_scope_arg] if test_case == 'oauth2_auth_token' and language == 'c++': # C++ oauth2 test uses GCE creds and thus needs to know the default account cmdargs += [default_account_arg] if test_case == 'compute_engine_creds': cmdargs += [oauth_scope_arg, default_account_arg] return (cmdargs, env) def _job_kill_handler(job): if job._spec.container_name: dockerjob.docker_kill(job._spec.container_name) # When the job times out and we decide to kill it, # we need to wait a before restarting the job # to prevent "container name already in use" error. # TODO(jtattermusch): figure out a cleaner way to to this. time.sleep(2) def cloud_to_prod_jobspec(language, test_case, server_host_name, server_host_detail, docker_image=None, auth=False, manual_cmd_log=None, service_account_key_file=None): """Creates jobspec for cloud-to-prod interop test""" container_name = None cmdargs = [ '--server_host=%s' % server_host_detail[0], '--server_host_override=%s' % server_host_detail[1], '--server_port=443', '--use_tls=true', '--test_case=%s' % test_case ] environ = dict(language.cloud_to_prod_env(), **language.global_env()) if auth: auth_cmdargs, auth_env = auth_options(language, test_case, service_account_key_file) cmdargs += auth_cmdargs environ.update(auth_env) cmdline = bash_cmdline(language.client_cmd(cmdargs)) cwd = language.client_cwd if docker_image: container_name = dockerjob.random_name( 'interop_client_%s' % language.safename) cmdline = docker_run_cmdline( cmdline, image=docker_image, cwd=cwd, environ=environ, docker_args=['--net=host', '--name=%s' % container_name]) if manual_cmd_log is not None: if manual_cmd_log == []: manual_cmd_log.append( 'echo "Testing ${docker_image:=%s}"' % docker_image) manual_cmd_log.append(manual_cmdline(cmdline, docker_image)) cwd = None environ = None suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod' test_job = jobset.JobSpec( cmdline=cmdline, cwd=cwd, environ=environ, shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name, test_case), timeout_seconds=_TEST_TIMEOUT, flake_retries=4 if args.allow_flakes else 0, timeout_retries=2 if args.allow_flakes else 0, kill_handler=_job_kill_handler) if docker_image: test_job.container_name = container_name return test_job def cloud_to_cloud_jobspec(language, test_case, server_name, server_host, server_port, docker_image=None, transport_security='tls', manual_cmd_log=None): """Creates jobspec for cloud-to-cloud interop test""" interop_only_options = [ '--server_host_override=foo.test.google.fr', '--use_test_ca=true', ] if transport_security == 'tls': interop_only_options += ['--use_tls=true'] elif transport_security == 'alts': interop_only_options += ['--use_tls=false', '--use_alts=true'] elif transport_security == 'insecure': interop_only_options += ['--use_tls=false'] else: print('Invalid transport security option.') sys.exit(1) client_test_case = test_case if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[ test_case] if client_test_case in language.unimplemented_test_cases(): print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case)) sys.exit(1) common_options = [ '--test_case=%s' % client_test_case, '--server_host=%s' % server_host, '--server_port=%s' % server_port, ] if test_case in _HTTP2_SERVER_TEST_CASES: if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: client_options = interop_only_options + common_options cmdline = bash_cmdline(language.client_cmd(client_options)) cwd = language.client_cwd else: cmdline = bash_cmdline( language.client_cmd_http2interop(common_options)) cwd = language.http2_cwd else: cmdline = bash_cmdline( language.client_cmd(common_options + interop_only_options)) cwd = language.client_cwd environ = language.global_env() if docker_image and language.safename != 'objc': # we can't run client in docker for objc. container_name = dockerjob.random_name( 'interop_client_%s' % language.safename) cmdline = docker_run_cmdline( cmdline, image=docker_image, environ=environ, cwd=cwd, docker_args=['--net=host', '--name=%s' % container_name]) if manual_cmd_log is not None: if manual_cmd_log == []: manual_cmd_log.append( 'echo "Testing ${docker_image:=%s}"' % docker_image) manual_cmd_log.append(manual_cmdline(cmdline, docker_image)) cwd = None test_job = jobset.JobSpec( cmdline=cmdline, cwd=cwd, environ=environ, shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name, test_case), timeout_seconds=_TEST_TIMEOUT, flake_retries=4 if args.allow_flakes else 0, timeout_retries=2 if args.allow_flakes else 0, kill_handler=_job_kill_handler) if docker_image: test_job.container_name = container_name return test_job def server_jobspec(language, docker_image, transport_security='tls', manual_cmd_log=None): """Create jobspec for running a server""" container_name = dockerjob.random_name( 'interop_server_%s' % language.safename) server_cmd = ['--port=%s' % _DEFAULT_SERVER_PORT] if transport_security == 'tls': server_cmd += ['--use_tls=true'] elif transport_security == 'alts': server_cmd += ['--use_tls=false', '--use_alts=true'] elif transport_security == 'insecure': server_cmd += ['--use_tls=false'] else: print('Invalid transport security option.') sys.exit(1) cmdline = bash_cmdline(language.server_cmd(server_cmd)) environ = language.global_env() docker_args = ['--name=%s' % container_name] if language.safename == 'http2': # we are running the http2 interop server. Open next N ports beginning # with the server port. These ports are used for http2 interop test # (one test case per port). docker_args += list( itertools.chain.from_iterable( ('-p', str(_DEFAULT_SERVER_PORT + i)) for i in range(len(_HTTP2_SERVER_TEST_CASES)))) # Enable docker's healthcheck mechanism. # This runs a Python script inside the container every second. The script # pings the http2 server to verify it is ready. The 'health-retries' flag # specifies the number of consecutive failures before docker will report # the container's status as 'unhealthy'. Prior to the first 'health_retries' # failures or the first success, the status will be 'starting'. 'docker ps' # or 'docker inspect' can be used to see the health of the container on the # command line. docker_args += [ '--health-cmd=python test/http2_test/http2_server_health_check.py ' '--server_host=%s --server_port=%d' % ('localhost', _DEFAULT_SERVER_PORT), '--health-interval=1s', '--health-retries=5', '--health-timeout=10s', ] else: docker_args += ['-p', str(_DEFAULT_SERVER_PORT)] docker_cmdline = docker_run_cmdline( cmdline, image=docker_image, cwd=language.server_cwd, environ=environ, docker_args=docker_args) if manual_cmd_log is not None: if manual_cmd_log == []: manual_cmd_log.append( 'echo "Testing ${docker_image:=%s}"' % docker_image) manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image)) server_job = jobset.JobSpec( cmdline=docker_cmdline, environ=environ, shortname='interop_server_%s' % language, timeout_seconds=30 * 60) server_job.container_name = container_name return server_job def build_interop_image_jobspec(language, tag=None): """Creates jobspec for building interop docker image for a language""" if not tag: tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4()) env = { 'INTEROP_IMAGE': tag, 'BASE_NAME': 'grpc_interop_%s' % language.safename } if not args.travis: env['TTY_FLAG'] = '-t' # This env variable is used to get around the github rate limit # error when running the PHP `composer install` command host_file = '%s/.composer/auth.json' % os.environ['HOME'] if language.safename == 'php' and os.path.exists(host_file): env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \ '-v %s:/root/.composer/auth.json:ro' % host_file build_job = jobset.JobSpec( cmdline=['tools/run_tests/dockerize/build_interop_image.sh'], environ=env, shortname='build_docker_%s' % (language), timeout_seconds=30 * 60) build_job.tag = tag return build_job def aggregate_http2_results(stdout): match = re.search(r'\{"cases[^\]]*\]\}', stdout) if not match: return None results = json.loads(match.group(0)) skipped = 0 passed = 0 failed = 0 failed_cases = [] for case in results['cases']: if case.get('skipped', False): skipped += 1 else: if case.get('passed', False): passed += 1 else: failed += 1 failed_cases.append(case.get('name', "NONAME")) return { 'passed': passed, 'failed': failed, 'skipped': skipped, 'failed_cases': ', '.join(failed_cases), 'percent': 1.0 * passed / (passed + failed) } # A dictionary of prod servers to test. # Format: server_name: (server_host, server_host_override, errors_allowed) # TODO(adelez): implement logic for errors_allowed where if the indicated tests # fail, they don't impact the overall test result. prod_servers = { 'default': ('216.239.32.254', 'grpc-test.sandbox.googleapis.com', False), 'gateway_v2': ('216.239.32.254', 'grpc-test2.sandbox.googleapis.com', True), 'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com', False), 'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com', True), 'gateway_v4': ('216.239.32.254', 'grpc-test4.sandbox.googleapis.com', True), 'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com', True), } argp = argparse.ArgumentParser(description='Run interop tests.') argp.add_argument( '-l', '--language', choices=['all'] + sorted(_LANGUAGES), nargs='+', default=['all'], help='Clients to run. Objc client can be only run on OSX.') argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int) argp.add_argument( '--cloud_to_prod', default=False, action='store_const', const=True, help='Run cloud_to_prod tests.') argp.add_argument( '--cloud_to_prod_auth', default=False, action='store_const', const=True, help='Run cloud_to_prod_auth tests.') argp.add_argument( '--prod_servers', choices=prod_servers.keys(), default=['default'], nargs='+', help=('The servers to run cloud_to_prod and ' 'cloud_to_prod_auth tests against.')) argp.add_argument( '-s', '--server', choices=['all'] + sorted(_SERVERS), nargs='+', help='Run cloud_to_cloud servers in a separate docker ' + 'image. Servers can only be started automatically if ' + '--use_docker option is enabled.', default=[]) argp.add_argument( '--override_server', action='append', type=lambda kv: kv.split('='), help= 'Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000', default=[]) argp.add_argument( '--service_account_key_file', type=str, help= 'Override the default service account key file to use for auth interop tests.', default=None) argp.add_argument( '-t', '--travis', default=False, action='store_const', const=True) argp.add_argument( '-v', '--verbose', default=False, action='store_const', const=True) argp.add_argument( '--use_docker', default=False, action='store_const', const=True, help='Run all the interop tests under docker. That provides ' + 'additional isolation and prevents the need to install ' + 'language specific prerequisites. Only available on Linux.') argp.add_argument( '--allow_flakes', default=False, action='store_const', const=True, help= 'Allow flaky tests to show as passing (re-runs failed tests up to five times)' ) argp.add_argument( '--manual_run', default=False, action='store_const', const=True, help='Prepare things for running interop tests manually. ' + 'Preserve docker images after building them and skip ' 'actually running the tests. Only print commands to run by ' + 'hand.') argp.add_argument( '--http2_interop', default=False, action='store_const', const=True, help='Enable HTTP/2 client edge case testing. (Bad client, good server)') argp.add_argument( '--http2_server_interop', default=False, action='store_const', const=True, help= 'Enable HTTP/2 server edge case testing. (Includes positive and negative tests' ) argp.add_argument( '--transport_security', choices=_TRANSPORT_SECURITY_OPTIONS, default='tls', type=str, nargs='?', const=True, help='Which transport security mechanism to use.') argp.add_argument( '--skip_compute_engine_creds', default=False, action='store_const', const=True, help='Skip auth tests requiring access to compute engine credentials.') argp.add_argument( '--internal_ci', default=False, action='store_const', const=True, help=('Put reports into subdirectories to improve ' 'presentation of results by Internal CI.')) argp.add_argument( '--bq_result_table', default='', type=str, nargs='?', help='Upload test results to a specified BQ table.') args = argp.parse_args() servers = set( s for s in itertools.chain.from_iterable( _SERVERS if x == 'all' else [x] for x in args.server)) # ALTS servers are only available for certain languages. if args.transport_security == 'alts': servers = servers.intersection(_SERVERS_FOR_ALTS_TEST_CASES) if args.use_docker: if not args.travis: print('Seen --use_docker flag, will run interop tests under docker.') print('') print( 'IMPORTANT: The changes you are testing need to be locally committed' ) print( 'because only the committed changes in the current branch will be') print('copied to the docker environment.') time.sleep(5) if args.manual_run and not args.use_docker: print('--manual_run is only supported with --use_docker option enabled.') sys.exit(1) if not args.use_docker and servers: print( 'Running interop servers is only supported with --use_docker option enabled.' ) sys.exit(1) # we want to include everything but objc in 'all' # because objc won't run on non-mac platforms all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc']) languages = set(_LANGUAGES[l] for l in itertools.chain.from_iterable( all_but_objc if x == 'all' else [x] for x in args.language)) # ALTS interop clients are only available for certain languages. if args.transport_security == 'alts': alts_languages = set(_LANGUAGES[l] for l in _LANGUAGES_FOR_ALTS_TEST_CASES) languages = languages.intersection(alts_languages) languages_http2_clients_for_http2_server_interop = set() if args.http2_server_interop: languages_http2_clients_for_http2_server_interop = set( _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES if 'all' in args.language or l in args.language) http2Interop = Http2Client() if args.http2_interop else None http2InteropServer = Http2Server() if args.http2_server_interop else None docker_images = {} if args.use_docker: # languages for which to build docker images languages_to_build = set( _LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers])) languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop if args.http2_interop: languages_to_build.add(http2Interop) if args.http2_server_interop: languages_to_build.add(http2InteropServer) build_jobs = [] for l in languages_to_build: if str(l) == 'objc': # we don't need to build a docker image for objc continue job = build_interop_image_jobspec(l) docker_images[str(l)] = job.tag build_jobs.append(job) if build_jobs: jobset.message( 'START', 'Building interop docker images.', do_newline=True) if args.verbose: print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs)) num_failures, _ = jobset.run( build_jobs, newline_on_success=True, maxjobs=args.jobs) if num_failures == 0: jobset.message( 'SUCCESS', 'All docker images built successfully.', do_newline=True) else: jobset.message( 'FAILED', 'Failed to build interop docker images.', do_newline=True) for image in six.itervalues(docker_images): dockerjob.remove_image(image, skip_nonexistent=True) sys.exit(1) server_manual_cmd_log = [] if args.manual_run else None client_manual_cmd_log = [] if args.manual_run else None # Start interop servers. server_jobs = {} server_addresses = {} try: for s in servers: lang = str(s) spec = server_jobspec( _LANGUAGES[lang], docker_images.get(lang), args.transport_security, manual_cmd_log=server_manual_cmd_log) if not args.manual_run: job = dockerjob.DockerJob(spec) server_jobs[lang] = job server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT)) else: # don't run the server, set server port to a placeholder value server_addresses[lang] = ('localhost', '${SERVER_PORT}') http2_server_job = None if args.http2_server_interop: # launch a HTTP2 server emulator that creates edge cases lang = str(http2InteropServer) spec = server_jobspec( http2InteropServer, docker_images.get(lang), manual_cmd_log=server_manual_cmd_log) if not args.manual_run: http2_server_job = dockerjob.DockerJob(spec) server_jobs[lang] = http2_server_job else: # don't run the server, set server port to a placeholder value server_addresses[lang] = ('localhost', '${SERVER_PORT}') jobs = [] if args.cloud_to_prod: if args.transport_security != 'tls': print('TLS is always enabled for cloud_to_prod scenarios.') for server_host_name in args.prod_servers: for language in languages: for test_case in _TEST_CASES: if not test_case in language.unimplemented_test_cases(): if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION: test_job = cloud_to_prod_jobspec( language, test_case, server_host_name, prod_servers[server_host_name], docker_image=docker_images.get(str(language)), manual_cmd_log=client_manual_cmd_log, service_account_key_file=args. service_account_key_file) jobs.append(test_job) if args.http2_interop: for test_case in _HTTP2_TEST_CASES: test_job = cloud_to_prod_jobspec( http2Interop, test_case, server_host_name, prod_servers[server_host_name], docker_image=docker_images.get(str(http2Interop)), manual_cmd_log=client_manual_cmd_log, service_account_key_file=args.service_account_key_file) jobs.append(test_job) if args.cloud_to_prod_auth: if args.transport_security != 'tls': print('TLS is always enabled for cloud_to_prod scenarios.') for server_host_name in args.prod_servers: for language in languages: for test_case in _AUTH_TEST_CASES: if (not args.skip_compute_engine_creds or not compute_engine_creds_required( language, test_case)): if not test_case in language.unimplemented_test_cases(): test_job = cloud_to_prod_jobspec( language, test_case, server_host_name, prod_servers[server_host_name], docker_image=docker_images.get(str(language)), auth=True, manual_cmd_log=client_manual_cmd_log, service_account_key_file=args. service_account_key_file) jobs.append(test_job) for server in args.override_server: server_name = server[0] (server_host, server_port) = server[1].split(':') server_addresses[server_name] = (server_host, server_port) for server_name, server_address in server_addresses.items(): (server_host, server_port) = server_address server_language = _LANGUAGES.get(server_name, None) skip_server = [] # test cases unimplemented by server if server_language: skip_server = server_language.unimplemented_test_cases_server() for language in languages: for test_case in _TEST_CASES: if not test_case in language.unimplemented_test_cases(): if not test_case in skip_server: test_job = cloud_to_cloud_jobspec( language, test_case, server_name, server_host, server_port, docker_image=docker_images.get(str(language)), transport_security=args.transport_security, manual_cmd_log=client_manual_cmd_log) jobs.append(test_job) if args.http2_interop: for test_case in _HTTP2_TEST_CASES: if server_name == "go": # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434 continue test_job = cloud_to_cloud_jobspec( http2Interop, test_case, server_name, server_host, server_port, docker_image=docker_images.get(str(http2Interop)), transport_security=args.transport_security, manual_cmd_log=client_manual_cmd_log) jobs.append(test_job) if args.http2_server_interop: if not args.manual_run: http2_server_job.wait_for_healthy(timeout_seconds=600) for language in languages_http2_clients_for_http2_server_interop: for test_case in set(_HTTP2_SERVER_TEST_CASES) - set( _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS): offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case) server_port = _DEFAULT_SERVER_PORT + offset if not args.manual_run: server_port = http2_server_job.mapped_port(server_port) test_job = cloud_to_cloud_jobspec( language, test_case, str(http2InteropServer), 'localhost', server_port, docker_image=docker_images.get(str(language)), manual_cmd_log=client_manual_cmd_log) jobs.append(test_job) for language in languages: # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather # than specialized http2 clients, reusing existing test implementations. # For example, in the "data_frame_padding" test, use language's gRPC # interop clients and make them think that theyre running "large_unary" # test case. This avoids implementing a new test case in each language. for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: if test_case not in language.unimplemented_test_cases(): offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case) server_port = _DEFAULT_SERVER_PORT + offset if not args.manual_run: server_port = http2_server_job.mapped_port(server_port) if args.transport_security != 'insecure': print( ('Creating grpc client to http2 server test case ' 'with insecure connection, even though ' 'args.transport_security is not insecure. Http2 ' 'test server only supports insecure connections.')) test_job = cloud_to_cloud_jobspec( language, test_case, str(http2InteropServer), 'localhost', server_port, docker_image=docker_images.get(str(language)), transport_security='insecure', manual_cmd_log=client_manual_cmd_log) jobs.append(test_job) if not jobs: print('No jobs to run.') for image in six.itervalues(docker_images): dockerjob.remove_image(image, skip_nonexistent=True) sys.exit(1) if args.manual_run: print('All tests will skipped --manual_run option is active.') if args.verbose: print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs)) num_failures, resultset = jobset.run( jobs, newline_on_success=True, maxjobs=args.jobs, skip_jobs=args.manual_run) if args.bq_result_table and resultset: upload_interop_results_to_bq(resultset, args.bq_result_table, args) if num_failures: jobset.message('FAILED', 'Some tests failed', do_newline=True) else: jobset.message('SUCCESS', 'All tests passed', do_newline=True) write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh') write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh') xml_report_name = _XML_REPORT if args.internal_ci: xml_report_name = _INTERNAL_CL_XML_REPORT report_utils.render_junit_xml_report(resultset, xml_report_name) for name, job in resultset.items(): if "http2" in name: job[0].http2results = aggregate_http2_results(job[0].message) http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES if args.http2_server_interop else []) report_utils.render_interop_html_report( set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES, _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures, args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers, args.http2_interop) if num_failures: sys.exit(1) else: sys.exit(0) except Exception as e: print('exception occurred:') traceback.print_exc(file=sys.stdout) finally: # Check if servers are still running. for server, job in server_jobs.items(): if not job.is_running(): print('Server "%s" has exited prematurely.' % server) dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)]) for image in six.itervalues(docker_images): if not args.manual_run: print('Removing docker image %s' % image) dockerjob.remove_image(image) else: print('Preserving docker image: %s' % image)
# -*- coding: UTF-8 -*- from django.test import Client, TestCase from django.contrib.auth.models import Group from django.core.cache import cache from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ from django.utils import simplejson from privilege.core.config import GROUP_CACHE_KEY class GroupTestCases(TestCase): fixtures = ['privilege.json'] def setUp(self): TestCase.setUp(self) self.client = Client() def tearDown(self): self.client.logout() TestCase.tearDown(self) def test_group_list_not_login(self): group_list_url = reverse("privilege.views.group.group_list", args=(1, )) self.check_not_login(group_list_url) def test_group_list_logined_but_not_superuser(self): group_list_url = reverse("privilege.views.group.group_list", args=(1, )) self.check_not_superuser(group_list_url) def test_group_list_ok(self): group_list_url = reverse("privilege.views.group.group_list", args=(1, )) self.client.login(username="super", password="test") response = self.client.get(group_list_url) self.assertEqual(response.status_code, 200) self.assertTrue(response.context["page"].object_list) def test_group_detail_not_login(self): group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,)) self.check_not_login(group_detail_url) def test_get_group_detail_logined_but_not_superuser(self): group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,)) self.check_not_superuser(group_detail_url) def test_get_group_detail_not_exist(self): group_detail_url = reverse("privilege.views.group.group_detail", args=(0, 1,)) self.client.login(username="super", password="test") response = self.client.get(group_detail_url) self.assertEqual(response.status_code, 404) def test_get_group_detail_ok(self): group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,)) self.client.login(username="super", password="test") response = self.client.get(group_detail_url) self.assertEqual(response.status_code, 200) self.assertTrue(response.context["group"]) def test_change_group_permission_not_login(self): change_group_url = reverse("privilege.views.group.change_group_permission") self.check_not_login(change_group_url) def test_change_group_permission_not_super_user(self): change_group_url = reverse("privilege.views.group.change_group_permission") self.check_not_superuser(change_group_url) def test_change_group_permission_get_method(self): change_group_url = reverse("privilege.views.group.change_group_permission") self.client.login(username="super", password="test") response = self.client.get(change_group_url) self.assertEqual(response.status_code, 200) expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")}) self.assertEqual(response.content, expect_content) def test_change_group_permission_not_exist(self): change_group_url = reverse("privilege.views.group.change_group_permission") post_data = {"group_id": 0} self.client.login(username="super", password="test") response = self.client.post(change_group_url, post_data) self.assertEqual(response.status_code, 200) expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")}) self.assertEqual(response.content, expect_content) def test_change_group_permission_post_bad_params(self): change_group_url = reverse("privilege.views.group.change_group_permission") post_data = {"group_id": 1, "permission_id": ""} self.client.login(username="super", password="test") response = self.client.post(change_group_url, post_data) self.assertEqual(response.status_code, 200) expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")}) self.assertEqual(response.content, expect_content) def test_change_group_permission_ok(self): change_group_url = reverse("privilege.views.group.change_group_permission") post_data = {"group_id": 1, "permission_id": "1", "op_code": "add"} self.client.login(username="super", password="test") response = self.client.post(change_group_url, post_data) self.assertEqual(response.status_code, 200) expect_content = simplejson.dumps({"status": "ok", "msg": _("Success")}) self.assertEqual(response.content, expect_content) cache.set(GROUP_CACHE_KEY, None) def test_add_group_not_login(self): add_group_url = reverse("privilege.views.group.add_group") self.check_not_login(add_group_url) def test_add_group_not_superuser(self): add_group_url = reverse("privilege.views.group.add_group") self.check_not_superuser(add_group_url) def test_add_group_not_post(self): add_group_url = reverse("privilege.views.group.add_group") self.client.login(username="super", password="test") response = self.client.get(add_group_url) self.assertEqual(response.status_code, 200) self.assertTrue(response.context["form"]) def test_add_group_post_blank(self): add_group_url = reverse("privilege.views.group.add_group") self.client.login(username="super", password="test") response = self.client.post(add_group_url, {"name": ""}) self.assertEqual(response.status_code, 200) self.assertTrue(response.context["form"].errors) def test_add_group_ok(self): add_group_url = reverse("privilege.views.group.add_group") self.client.login(username="super", password="test") response = self.client.post(add_group_url, {"name": "add_success"}) self.assertEqual(response.status_code, 302) self.assertTrue(Group.objects.filter(name="add_success").count()) Group.objects.filter(name="add_success").delete() cache.set(GROUP_CACHE_KEY, None) def test_edit_group_not_login(self): edit_group_url = reverse("privilege.views.group.edit_group", args=(1, )) self.check_not_login(edit_group_url) def test_edit_group_not_superuser(self): edit_group_url = reverse("privilege.views.group.edit_group", args=(1, )) self.check_not_superuser(edit_group_url) def test_test_edit_group_not_exist(self): edit_group_url = reverse("privilege.views.group.edit_group", args=(0, )) self.client.login(username="super", password="test") response = self.client.get(edit_group_url) self.assertEqual(response.status_code, 404) def test_test_edit_group_not_post(self): edit_group_url = reverse("privilege.views.group.edit_group", args=(1, )) self.client.login(username="super", password="test") response = self.client.get(edit_group_url) self.assertEqual(response.status_code, 200) self.assertTrue(response.context["form"]) def test_test_edit_group_post_blank(self): edit_group_url = reverse("privilege.views.group.edit_group", args=(1, )) self.client.login(username="super", password="test") response = self.client.post(edit_group_url, {"name": ""}) self.assertEqual(response.status_code, 200) self.assertTrue(response.context["form"].errors) def test_test_edit_group_ok(self): group = Group.objects.create(name="to_delete") edit_group_url = reverse("privilege.views.group.edit_group", args=(group.id, )) self.client.login(username="super", password="test") response = self.client.post(edit_group_url, {"name": "changed"}) self.assertEqual(response.status_code, 302) group = Group.objects.get(id=group.id) self.assertEqual(group.name, "changed") group.delete() cache.set(GROUP_CACHE_KEY, None) def test_delete_grooup_not_login(self): delete_group_url = reverse("privilege.views.group.delete_group", args=(1, )) self.check_not_login(delete_group_url) def test_delete_grooup_not_superuser(self): delete_group_url = reverse("privilege.views.group.delete_group", args=(1, )) self.check_not_superuser(delete_group_url) def test_delete_grooup_ok(self): delete_group_url = reverse("privilege.views.group.delete_group", args=(0, )) response = self.client.post(delete_group_url) self.assertEqual(response.status_code, 302) cache.set(GROUP_CACHE_KEY, None) def check_not_login(self, url): response = self.client.get(url) self.assertEqual(response.status_code, 302) def check_not_superuser(self, url): self.client.login(username="test", password="test") response = self.client.get(url) self.assertEqual(response.status_code, 403)
# TODO: Use the fact that axis can have units to simplify the process import functools import numpy as np from matplotlib import pylab from pandas._libs.tslibs.period import Period from pandas.core.dtypes.generic import ( ABCPeriodIndex, ABCDatetimeIndex, ABCTimedeltaIndex) from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies from pandas.io.formats.printing import pprint_thing import pandas.compat as compat from pandas.plotting._converter import (TimeSeries_DateLocator, TimeSeries_DateFormatter, TimeSeries_TimedeltaFormatter) # --------------------------------------------------------------------- # Plotting functions and monkey patches def tsplot(series, plotf, ax=None, **kwargs): import warnings """ Plots a Series on the given Matplotlib axes or the current axes Parameters ---------- axes : Axes series : Series Notes _____ Supports same kwargs as Axes.plot .. deprecated:: 0.23.0 Use Series.plot() instead """ warnings.warn("'tsplot' is deprecated and will be removed in a " "future version. Please use Series.plot() instead.", FutureWarning, stacklevel=2) # Used inferred freq is possible, need a test case for inferred if ax is None: import matplotlib.pyplot as plt ax = plt.gca() freq, series = _maybe_resample(series, ax, kwargs) # Set ax with freq info _decorate_axes(ax, freq, kwargs) ax._plot_data.append((series, plotf, kwargs)) lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs) # set date formatter, locators and rescale limits format_dateaxis(ax, ax.freq, series.index) return lines def _maybe_resample(series, ax, kwargs): # resample against axes freq if necessary freq, ax_freq = _get_freq(ax, series) if freq is None: # pragma: no cover raise ValueError('Cannot use dynamic axis without frequency info') # Convert DatetimeIndex to PeriodIndex if isinstance(series.index, ABCDatetimeIndex): series = series.to_period(freq=freq) if ax_freq is not None and freq != ax_freq: if frequencies.is_superperiod(freq, ax_freq): # upsample input series = series.copy() series.index = series.index.asfreq(ax_freq, how='s') freq = ax_freq elif _is_sup(freq, ax_freq): # one is weekly how = kwargs.pop('how', 'last') series = getattr(series.resample('D'), how)().dropna() series = getattr(series.resample(ax_freq), how)().dropna() freq = ax_freq elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, kwargs) else: # pragma: no cover raise ValueError('Incompatible frequency conversion') return freq, series def _is_sub(f1, f2): return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or (f2.startswith('W') and frequencies.is_subperiod(f1, 'D'))) def _is_sup(f1, f2): return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or (f2.startswith('W') and frequencies.is_superperiod(f1, 'D'))) def _upsample_others(ax, freq, kwargs): legend = ax.get_legend() lines, labels = _replot_ax(ax, freq, kwargs) _replot_ax(ax, freq, kwargs) other_ax = None if hasattr(ax, 'left_ax'): other_ax = ax.left_ax if hasattr(ax, 'right_ax'): other_ax = ax.right_ax if other_ax is not None: rlines, rlabels = _replot_ax(other_ax, freq, kwargs) lines.extend(rlines) labels.extend(rlabels) if (legend is not None and kwargs.get('legend', True) and len(lines) > 0): title = legend.get_title().get_text() if title == 'None': title = None ax.legend(lines, labels, loc='best', title=title) def _replot_ax(ax, freq, kwargs): data = getattr(ax, '_plot_data', None) # clear current axes and data ax._plot_data = [] ax.clear() _decorate_axes(ax, freq, kwargs) lines = [] labels = [] if data is not None: for series, plotf, kwds in data: series = series.copy() idx = series.index.asfreq(freq, how='S') series.index = idx ax._plot_data.append((series, plotf, kwds)) # for tsplot if isinstance(plotf, compat.string_types): from pandas.plotting._core import _plot_klass plotf = _plot_klass[plotf]._plot lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0]) labels.append(pprint_thing(series.name)) return lines, labels def _decorate_axes(ax, freq, kwargs): """Initialize axes for time-series plotting""" if not hasattr(ax, '_plot_data'): ax._plot_data = [] ax.freq = freq xaxis = ax.get_xaxis() xaxis.freq = freq if not hasattr(ax, 'legendlabels'): ax.legendlabels = [kwargs.get('label', None)] else: ax.legendlabels.append(kwargs.get('label', None)) ax.view_interval = None ax.date_axis_info = None def _get_ax_freq(ax): """ Get the freq attribute of the ax object if set. Also checks shared axes (eg when using secondary yaxis, sharex=True or twinx) """ ax_freq = getattr(ax, 'freq', None) if ax_freq is None: # check for left/right ax in case of secondary yaxis if hasattr(ax, 'left_ax'): ax_freq = getattr(ax.left_ax, 'freq', None) elif hasattr(ax, 'right_ax'): ax_freq = getattr(ax.right_ax, 'freq', None) if ax_freq is None: # check if a shared ax (sharex/twinx) has already freq set shared_axes = ax.get_shared_x_axes().get_siblings(ax) if len(shared_axes) > 1: for shared_ax in shared_axes: ax_freq = getattr(shared_ax, 'freq', None) if ax_freq is not None: break return ax_freq def _get_freq(ax, series): # get frequency from data freq = getattr(series.index, 'freq', None) if freq is None: freq = getattr(series.index, 'inferred_freq', None) ax_freq = _get_ax_freq(ax) # use axes freq if no data freq if freq is None: freq = ax_freq # get the period frequency if isinstance(freq, DateOffset): freq = freq.rule_code else: freq = frequencies.get_base_alias(freq) freq = frequencies.get_period_alias(freq) return freq, ax_freq def _use_dynamic_x(ax, data): freq = _get_index_freq(data) ax_freq = _get_ax_freq(ax) if freq is None: # convert irregular if axes has freq info freq = ax_freq else: # do not use tsplot if irregular was plotted first if (ax_freq is None) and (len(ax.get_lines()) > 0): return False if freq is None: return False if isinstance(freq, DateOffset): freq = freq.rule_code else: freq = frequencies.get_base_alias(freq) freq = frequencies.get_period_alias(freq) if freq is None: return False # hack this for 0.10.1, creating more technical debt...sigh if isinstance(data.index, ABCDatetimeIndex): base = frequencies.get_freq(freq) x = data.index if (base <= frequencies.FreqGroup.FR_DAY): return x[:1].is_normalized return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0] return True def _get_index_freq(data): freq = getattr(data.index, 'freq', None) if freq is None: freq = getattr(data.index, 'inferred_freq', None) if freq == 'B': weekdays = np.unique(data.index.dayofweek) if (5 in weekdays) or (6 in weekdays): freq = None return freq def _maybe_convert_index(ax, data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames if isinstance(data.index, ABCDatetimeIndex): freq = getattr(data.index, 'freq', None) if freq is None: freq = getattr(data.index, 'inferred_freq', None) if isinstance(freq, DateOffset): freq = freq.rule_code if freq is None: freq = _get_ax_freq(ax) if freq is None: raise ValueError('Could not get frequency alias for plotting') freq = frequencies.get_base_alias(freq) freq = frequencies.get_period_alias(freq) data = data.to_period(freq=freq) return data # Patch methods for subplot. Only format_dateaxis is currently used. # Do we need the rest for convenience? def format_timedelta_ticks(x, pos, n_decimals): """ Convert seconds to 'D days HH:MM:SS.F' """ s, ns = divmod(x, 1e9) m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) decimals = int(ns * 10**(n_decimals - 9)) s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) if n_decimals > 0: s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) if d != 0: s = '{:d} days '.format(int(d)) + s return s def _format_coord(freq, t, y): return "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y) def format_dateaxis(subplot, freq, index): """ Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks. """ # handle index specific formatting # Note: DatetimeIndex does not use this # interface. DatetimeIndex uses matplotlib.date directly if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_locator(majlocator) subplot.xaxis.set_minor_locator(minlocator) majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_formatter(majformatter) subplot.xaxis.set_minor_formatter(minformatter) # x and y coord info subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter( TimeSeries_TimedeltaFormatter()) else: raise TypeError('index type not supported') pylab.draw_if_interactive()
#!/usr/bin/python # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_image_info version_added: "2.9" short_description: Get facts about azure custom images description: - List azure custom images. The images can be listed where scope of listing can be based on subscription, resource group, name or tags. options: resource_group: description: - Name of resource group. name: description: - Name of the image to filter from existing images. tags: description: - List of tags to be matched. extends_documentation_fragment: - azure author: - Madhura Naniwadekar (@Madhura-CSI) ''' EXAMPLES = ''' - name: List images with name azure_rm_image_info: name: test-image resource_group: myResourceGroup - name: List images by resource group azure_rm_image_info: resource_group: myResourceGroup tags: - testing - foo:bar - name: List all available images under current subscription azure_rm_image_info: ''' RETURN = ''' images: description: - List of image dicts. returned: always type: complex contains: id: description: - Id of the image. returned: always type: str sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/xx name: description: - Name of the image. returned: always type: str resource_group: description: - Resource group of the image. returned: always type: str sample: myResourceGroup location: description: - Location of the image. returned: always type: str os_disk: description: - Id of os disk for image. type: str sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx os_disk_caching: description: - Specifies caching requirements for the image. returned: always type: str os_state: description: - Specifies image operating system state. Possible values are C(Generalized) or C(Specialized). returned: always type: str sample: Generalized os_storage_account_type: description: - Specifies the storage account type for the managed disk. type: str sample: Standard_LRS os_type: description: - Type of OS for image. returned: always type: str sample: Linux provisioning_state: description: - State of image. returned: always type: str sample: Succeeded source: description: - Resource id of source VM from which the image is created. type: str sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/xx tags: description: - Dictionary of tags associated with the image. type: complex data_disks: description: - List of data disks associated with the image. type: complex returned: always contains: caching: description: - Type of caching of data disk. sample: read_only disk_size_gb: description: - Specifies the size of empty data disks in gigabytes. returned: always type: int sample: 50 lun: description: - Specifies the logical unit number of the data disk. returned: always type: int sample: 0 storage_account_type: description: - Specifies the storage account type for the managed disk data disk. type: str sample: Standard_LRS managed_disk_id: description: - Id of managed disk. type: str sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx blob_uri: description: - The virtual hard disk. ''' try: from msrestazure.azure_exceptions import CloudError except Exception: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase AZURE_ENUM_MODULES = ['azure.mgmt.compute.models'] class AzureRMImageInfo(AzureRMModuleBase): def __init__(self, **kwargs): self.module_arg_spec = dict( resource_group=dict(type='str'), name=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False ) self.resource_group = None self.name = None self.format = None self.tags = None super(AzureRMImageInfo, self).__init__( derived_arg_spec=self.module_arg_spec, supports_tags=False, facts_module=True ) def exec_module(self, **kwargs): is_old_facts = self.module._name == 'azure_rm_image_facts' if is_old_facts: self.module.deprecate("The 'azure_rm_image_facts' module has been renamed to 'azure_rm_image_info'", version='2.13') for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name and self.resource_group: self.results['images'] = self.get_image(self.resource_group, self.name) elif self.name and not self.resource_group: self.results['images'] = self.list_images(self.name) elif not self.name and self.resource_group: self.results['images'] = self.list_images_by_resource_group(self.resource_group) elif not self.name and not self.resource_group: self.results['images'] = self.list_images() return self.results def get_image(self, resource_group, image_name): ''' Returns image details based on its name ''' self.log('Get properties for {0}'.format(self.name)) result = [] item = None try: item = self.compute_client.images.get(resource_group, image_name) except CloudError as exc: self.fail('Failed to list images - {0}'.format(str(exc))) result = [self.format_item(item)] return result def list_images_by_resource_group(self, resource_group): ''' Returns image details based on its resource group ''' self.log('List images filtered by resource group') response = None try: response = self.compute_client.images.list_by_resource_group(resource_group) except CloudError as exc: self.fail("Failed to list images: {0}".format(str(exc))) return [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else [] def list_images(self, image_name=None): ''' Returns image details in current subscription ''' self.log('List images within current subscription') response = None results = [] try: response = self.compute_client.images.list() except CloudError as exc: self.fail("Failed to list all images: {0}".format(str(exc))) results = [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else [] if image_name: results = [result for result in results if result['name'] == image_name] return results def format_item(self, item): d = item.as_dict() for data_disk in d['storage_profile']['data_disks']: if 'managed_disk' in data_disk.keys(): data_disk['managed_disk_id'] = data_disk['managed_disk']['id'] data_disk.pop('managed_disk', None) d = { 'id': d['id'], 'resource_group': d['id'].split('/')[4], 'name': d['name'], 'location': d['location'], 'tags': d.get('tags'), 'source': d['source_virtual_machine']['id'] if 'source_virtual_machine' in d.keys() else None, 'os_type': d['storage_profile']['os_disk']['os_type'], 'os_state': d['storage_profile']['os_disk']['os_state'], 'os_disk_caching': d['storage_profile']['os_disk']['caching'], 'os_storage_account_type': d['storage_profile']['os_disk']['storage_account_type'], 'os_disk': d['storage_profile']['os_disk']['managed_disk']['id'] if 'managed_disk' in d['storage_profile']['os_disk'].keys() else None, 'os_blob_uri': d['storage_profile']['os_disk']['blob_uri'] if 'blob_uri' in d['storage_profile']['os_disk'].keys() else None, 'provisioning_state': d['provisioning_state'], 'data_disks': d['storage_profile']['data_disks'] } return d def main(): AzureRMImageInfo() if __name__ == '__main__': main()
# [email protected] from __future__ import division import urllib import urllib2 from os import walk import os import time from PIL import Image from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from DataBaseHandler import DatabaseHandler from androdd import dump_all_method import json from collections import Counter import unicodedata import numpy as np from shutil import copyfile import xlsxwriter class Main(): def __init__(self): self.db = DatabaseHandler() def generat(self): f = [] for (dirpath, dirnames, filenames) in walk(self.constant.getInputDir()): f.extend(filenames) return f def get_all_files_in_directory(directory): f = [] for (dirpath, dirnames, filenames) in walk(directory): f.extend(filenames) return f def get_all_files_withpath_in_directory(directory): f = [] for (dirpath, dirnames, filenames) in walk(directory): if filenames: for item in filenames: fillee = dirpath + '/' + item f.append(fillee) return f def clean_up_folder(folder): for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception as e: print(e) def func_weight_p_op1_op2(sample_mal, sample_mal_1, vector): cal_class = [] for iii in range(0, len(sample_mal)): sample_vector = {} dict_y = Counter(sample_mal_1[iii]) dict_x = Counter(sample_mal[iii]) for op_seq in vector: print str(op_seq) spliter = op_seq.strip().split() x = 0 y = 0 if spliter[0] in dict_y: y = dict_y[spliter[0]] if op_seq in dict_x: x = dict_x[op_seq] if y != 0: p = x / y else: p = 0 sample_vector[op_seq] = p cal_class.append(sample_vector) return cal_class def func_weight_freq(sample_mal): cal_class = [] for iii in range(0, len(sample_mal)): dict_x = Counter(sample_mal[iii]) cal_class.append(dict_x) return cal_class def write_arff(dataset, class1, class2): final_op_set = [] opcode_bank = {} index_helper_x = 0 seen = set() for item in class1: for key, value in item.iteritems(): splitter = key.strip().split() if splitter[0] not in seen: final_op_set.append(splitter[0]) opcode_bank[splitter[0]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[0]) if splitter[1] not in seen: final_op_set.append(splitter[1]) opcode_bank[splitter[1]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[1]) for item in class2: for key, value in item.iteritems(): splitter = key.strip().split() if splitter[0] not in seen: final_op_set.append(splitter[0]) opcode_bank[splitter[0]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[0]) if splitter[1] not in seen: final_op_set.append(splitter[1]) opcode_bank[splitter[1]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[1]) data_fp = open(dataset, "w") data_fp.write('''@RELATION OpcodeSequence ''') data_fp.write("\n") for opc_i in final_op_set: for opc_j in final_op_set: name = str(opc_i) + str(opc_j) data_fp.write("@ATTRIBUTE %s NUMERIC \n" % name) data_fp.write("@ATTRIBUTE Class1 {mal,bin} \n") data_fp.write("\n") data_fp.write("@DATA") data_fp.write("\n") for item in class1: image = np.array([[0.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))]) for opc_i in final_op_set: for opc_j in final_op_set: x = opcode_bank[opc_i] y = opcode_bank[opc_j] key = str(str(opc_i) + " " + str(opc_j)) print key if key in item: image[x][y] = item[str(opc_i) + " " + str(opc_j)] data_fp.write(str(item[str(opc_i) + " " + str(opc_j)]) + ",") else: data_fp.write("0" + ",") data_fp.write("mal") data_fp.write("\n") for item in class2: image = np.array([[0.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))]) for opc_i in final_op_set: for opc_j in final_op_set: x = opcode_bank[opc_i] y = opcode_bank[opc_j] key = str(str(opc_i) + " " + str(opc_j)) print key if key in item: image[x][y] = item[str(opc_i) + " " + str(opc_j)] data_fp.write(str(item[str(opc_i) + " " + str(opc_j)]) + ",") else: data_fp.write("0" + ",") data_fp.write("bin") data_fp.write("\n") def write_arff_n_opcode(i, dataset, class1, class2): final_op_set = [] seen = set() for item in class1: for key in item: if key not in seen: final_op_set.append(key) seen.add(key) for item in class2: for key in item: if key not in seen: final_op_set.append(key) seen.add(key) data_fp = open(dataset, "w") data_fp.write('''@RELATION OpcodeSequence ''') data_fp.write("\n") for x in range(0,len(final_op_set)): name = str(x) data_fp.write("@ATTRIBUTE %s NUMERIC \n" % name) data_fp.write("@ATTRIBUTE Class1 {mal,bin} \n") data_fp.write("\n") data_fp.write("@DATA") data_fp.write("\n") for item in class1: for key in final_op_set: if key in item: data_fp.write(str(item[key]) + ",") else: data_fp.write("0" + ",") data_fp.write("mal") data_fp.write("\n") for item in class1: for key in final_op_set: if key in item: data_fp.write(str(item[key]) + ",") else: data_fp.write("0" + ",") data_fp.write("bin") data_fp.write("\n") def capture_image(repo,dump_method_dir): db = DatabaseHandler() samples = db.select_sample_all() vector = [] sample = [] sample_name = [] sample_1 = [] seen = set() for item in samples: try: # Generate Opcode Seq for every sample dump_all_method(repo + item[1], dump_method_dir) opcode_sequence = check_opcode(dump_method_dir,2) opcode_list1 = check_opcode2(dump_method_dir) # Add opcode seq to class belong if item[1].endswith(".apk"): sample.append(opcode_sequence) sample_1.append(opcode_list1) sample_name.append(item[1]) for item in opcode_sequence: if item not in seen: vector.append(item) seen.add(item) except Exception as e: print e sample_class = [] sample_class = func_weight_p_op1_op2(sample, sample_1, vector) final_op_set = [] opcode_bank = {} index_helper_x = 0 seen = set() for item in sample_class: for key, value in item.iteritems(): splitter = key.strip().split() if splitter[0] not in seen: final_op_set.append(splitter[0]) opcode_bank[splitter[0]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[0]) if splitter[1] not in seen: final_op_set.append(splitter[1]) opcode_bank[splitter[1]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[1]) index_name = 0 for item in sample_class: image = np.array([[0.0 for j in range(256)] for i in range(256)]) for opc_i in final_op_set: for opc_j in final_op_set: x = opcode_bank[opc_i] y = opcode_bank[opc_j] key = str(str(opc_i) + " " + str(opc_j)) if key in item: image[x][y] = item[str(opc_i) + " " + str(opc_j)] else: image[x][y] = 0 rescaled = (255.0 / image.max() * (image - image.min())).astype(np.uint8) im = Image.fromarray(rescaled) im.show() im.save(str(sample_name[index_name])+'.png', 'PNG') index_name = index_name + 1 def opcode_sequence_generator4(repo, dumpMethodDir): db = DatabaseHandler() samples = db.select_sample_all() vector = [] sample_mal = [] sample_bin = [] sample_mal_1 = [] sample_bin_1 = [] sample_bin_name = [] sample_mal_name = [] seen = set() for item in samples: try: # Generate Opcode Seq for every sample if item[1].endswith(".apk"): dump_all_method(repo + item[1], dumpMethodDir) opcode_sequence = check_opcode(dumpMethodDir,2) opcode_list1 = check_opcode2(dumpMethodDir) # Add opcode seq to class belong if item[1].startswith('bin_') and item[1].endswith(".apk"): sample_bin.append(opcode_sequence) sample_bin_1.append(opcode_list1) sample_bin_name.append(item[1]) elif item[1].endswith(".apk"): sample_mal.append(opcode_sequence) sample_mal_1.append(opcode_list1) sample_mal_name.append(item[1]) # Generate a Sequence banck for item in opcode_sequence: if item not in seen: vector.append(item) seen.add(item) except Exception as e: print e mal_class = [] bin_class = [] mal_class = func_weight_p_op1_op2(sample_mal, sample_mal_1, vector) bin_class = func_weight_p_op1_op2(sample_bin, sample_bin_1, vector) write_arff(repo + 'result.arff', mal_class, bin_class) output_filename = repo + 'resultLDA.txt' simple_result = repo + 'Expenses01.xlsx' fp_lda = open(output_filename, "w") workbook = xlsxwriter.Workbook(simple_result) worksheet = workbook.add_worksheet() n_fold = 5 top_edge = [] for i in range(2, 250): top_edge.append(i) row_index = 0 for top in top_edge: total_tp = 0 total_tn = 0 total_fp = 0 total_fn = 0 total_acc = 0 total_tpr = 0 total_fpr = 0 total_final_set = 0 name = "************** TOP" + str(top) + " **************" fp_lda.write(name) fp_lda.write('\n') test_count_mal = int(len(mal_class) / n_fold) test_count_bin = int(len(bin_class) / n_fold) p_bin = 0 p_mal = 0 for fold in range(1, n_fold + 1): train_mal_class = [] train_bin_class = [] test_mal_class = [] test_bin_class = [] test_mal_name = [] test_bin_name = [] for i in range(0, len(bin_class)): if i >= p_bin * test_count_bin and i < p_bin * test_count_bin + test_count_bin: test_bin_class.append(bin_class[i]) test_bin_name.append(sample_bin_name[i]) else: train_bin_class.append(bin_class[i]) p_bin = p_bin + 1 for i in range(0, len(mal_class)): if i >= p_mal * test_count_mal and i < p_mal * test_count_mal + test_count_mal: test_mal_class.append(mal_class[i]) test_mal_name.append(sample_mal_name[i]) else: train_mal_class.append(mal_class[i]) p_mal = p_mal + 1 # calculate MIN mal class for every feature MIN_total = {} total_len = len(train_mal_class) + len(train_bin_class) print "start Calculate Mean Malware Class" MIN_mal = {} for feature in vector: sum_feature = 0 for item in train_mal_class: if feature in item: sum_feature = item[feature] + sum_feature MIN_mal[feature] = sum_feature / len(train_mal_class) MIN_total[feature] = sum_feature print "start Calculate Mean Bin Class" MIN_bin = {} for feature in vector: sum_feature = 0 for item in train_bin_class: if feature in item: sum_feature = item[feature] + sum_feature MIN_bin[feature] = sum_feature / len(train_bin_class) MIN_total[feature] = (MIN_total[feature] + sum_feature) / total_len print "start Calculate SW" # Calculate SW SW = {} for feature in vector: sum_feature = 0 for item in train_mal_class: if feature in item and feature in MIN_mal: X = item[feature] - MIN_mal[feature] elif feature in item: X = item[feature] elif feature in MIN_mal: X = MIN_mal[feature] else: X = 0 Y = X * X sum_feature = sum_feature + Y for item in train_bin_class: if feature in item and feature in MIN_bin: X = item[feature] - MIN_bin[feature] elif feature in item: X = item[feature] elif feature in MIN_bin: X = MIN_bin[feature] else: X = 0 Y = X * X sum_feature = sum_feature + Y SW[feature] = sum_feature # Calculate SB print "start Calculate Mean SB" malware_persentage = len(train_mal_class) * 100 / total_len binware_persentage = len(train_mal_class) * 100 / total_len SB = {} for features in vector: if feature in MIN_mal and feature in MIN_bin: total_mean = MIN_total[features] SB[features] = (malware_persentage * (MIN_mal[features] - total_mean) * (MIN_mal[features] - total_mean)) + ( binware_persentage * (MIN_bin[features] - total_mean) * (MIN_bin[features] - total_mean)) elif feature in MIN_bin: total_mean = MIN_total[features] SB[features] = (malware_persentage * (0 - total_mean) * (0 - total_mean)) + ( binware_persentage * (MIN_bin[features] - total_mean) * (MIN_bin[features] - total_mean)) elif feature in MIN_mal: total_mean = MIN_total[features] SB[features] = (malware_persentage * (MIN_mal[features] - total_mean) * (MIN_mal[features] - total_mean)) + ( binware_persentage * (0 - total_mean) * (0 - total_mean)) else: total_mean = 0 SB[features] = (malware_persentage * (0 - total_mean) * (0 - total_mean)) + (binware_persentage * (0 - total_mean) * (0 - total_mean)) # Calculate ST print "start Calculate ST" ST = {} for item in vector: if SW[item] != 0: ST[item] = (SB[item]) / SW[item] else: ST[item] = 0 select_top = sorted(ST.iteritems(), key=lambda x: -x[1], reverse=False)[: top] final_op_set = [] opcode_bank = {} index_helper_x = 0 seen = set() for key, value in select_top: splitter = key.strip().split() if splitter[0] not in seen: final_op_set.append(splitter[0]) opcode_bank[splitter[0]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[0]) if splitter[1] not in seen: final_op_set.append(splitter[1]) opcode_bank[splitter[1]] = index_helper_x index_helper_x = index_helper_x + 1 seen.add(splitter[1]) len_train = len(train_bin_class) + len(train_mal_class) test_set_mal = np.zeros((len(test_mal_class), len(final_op_set) * len(final_op_set))) test_set_bin = np.zeros((len(test_bin_class), len(final_op_set) * len(final_op_set))) train_set = np.zeros((len_train, len(final_op_set) * len(final_op_set))) train_lable = [] index_train = 0 for item in train_mal_class: image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))]) for opc_i in final_op_set: for opc_j in final_op_set: x = opcode_bank[opc_i] y = opcode_bank[opc_j] key = str(str(opc_i) + " " + str(opc_j)) if key in item: image[x][y] = item[str(opc_i) + " " + str(opc_j)] else: image[x][y] = 0 train_set[index_train] = image.flatten() train_lable.append(1) index_train = index_train + 1 for item in train_bin_class: image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))]) for opc_i in final_op_set: for opc_j in final_op_set: x = opcode_bank[opc_i] y = opcode_bank[opc_j] key = str(str(opc_i) + " " + str(opc_j)) if key in item: image[x][y] = item[str(opc_i) + " " + str(opc_j)] else: image[x][y] = 0 train_set[index_train] = image.flatten() train_lable.append(0) index_train = index_train + 1 index_test = 0 for item in test_mal_class: image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))]) for opc_i in final_op_set: for opc_j in final_op_set: x = opcode_bank[opc_i] y = opcode_bank[opc_j] key = str(str(opc_i) + " " + str(opc_j)) if key in item: image[x][y] = item[str(opc_i) + " " + str(opc_j)] else: image[x][y] = 0 test_set_mal[index_test] = image.flatten() index_test = index_test + 1 index_test = 0 for item in test_bin_class: image = np.array([[1.0 for j in range(len(final_op_set))] for i in range(len(final_op_set))]) for opc_i in final_op_set: for opc_j in final_op_set: x = opcode_bank[opc_i] y = opcode_bank[opc_j] key = str(str(opc_i) + " " + str(opc_j)) if key in item: image[x][y] = item[str(opc_i) + " " + str(opc_j)] else: image[x][y] = 0 test_set_bin[index_test] = image.flatten() index_test = index_test + 1 clf = LinearDiscriminantAnalysis() clf.fit(train_set, train_lable) tp = 0 tn = 0 fp = 0 fn = 0 fn_name = [] fp_name = [] index_name = 0 for item in test_set_mal: result = clf.predict(item.reshape(1, -1)) if result == 1: tp = tp + 1 else: fn = fn + 1 fn_name.append(test_mal_name[index_name]) index_name = index_name + 1 index_name = 0 for item in test_set_bin: result = clf.predict(item.reshape(1, -1)) if result == 0: tn = tn + 1 else: fp = fp + 1 fp_name.append(test_bin_name[index_name]) index_name = index_name + 1 acc = (tp + tn) / (tp + tn + fp + fn) tpr = (tp) / (tp + fn) fpr = (fp) / (fp + tn) fp_lda.write('\n') fp_lda.write('TP : ' + str(tp)) fp_lda.write('\n') fp_lda.write('TN : ' + str(tn)) fp_lda.write('\n') fp_lda.write('FP : ' + str(fp)) fp_lda.write('\n') fp_lda.write('FN : ' + str(fn)) fp_lda.write('\n') fp_lda.write('ACC : ' + str(acc)) fp_lda.write('\n') fp_lda.write('LEN : ' + str(len(final_op_set))) fp_lda.write('\n') for item in fp_name: fp_lda.write('fp_name : ' + str(item)) fp_lda.write('\n') for item in fn_name: fp_lda.write('fn_name : ' + str(item)) fp_lda.write('\n') total_tp = total_tp + tp total_tn = total_tn + tn total_fp = total_fp + fp total_fn = total_fn + fn total_acc = total_acc + acc total_tpr = total_tpr + tpr total_fpr = total_fpr + fpr total_final_set = len(final_op_set) + total_final_set col_index = 0 worksheet.write(row_index, col_index, total_tp / fold) col_index = col_index + 1 worksheet.write(row_index, col_index, total_fp / fold) col_index = col_index + 1 worksheet.write(row_index, col_index, total_tn / fold) col_index = col_index + 1 worksheet.write(row_index, col_index, total_fn / fold) col_index = col_index + 1 worksheet.write(row_index, col_index, total_tpr / fold) col_index = col_index + 1 worksheet.write(row_index, col_index, total_fpr / fold) col_index = col_index + 1 worksheet.write(row_index, col_index, total_acc / fold) col_index = col_index + 1 worksheet.write(row_index, col_index, top) col_index = col_index + 1 worksheet.write(row_index, col_index, total_final_set / fold) col_index = col_index + 1 row_index = row_index + 1 def opcode_sequence_generator5(repo, dumpMethodDir): db = DatabaseHandler() samples = db.select_sample_all() for i in range(2,11): vector = [] sample_mal = [] sample_bin = [] seen = set() for item in samples: try: if item[1].endswith(".apk"): dump_all_method(repo + item[1], dumpMethodDir) opcode_sequence = check_opcode(dumpMethodDir,i) # Add opcode seq to class belong if item[1].startswith('bin_') and item[1].endswith(".apk"): sample_bin.append(opcode_sequence) elif item[1].endswith(".apk"): sample_mal.append(opcode_sequence) # Generate a Sequence banck for item in opcode_sequence: if item not in seen: vector.append(item) seen.add(item) except Exception as e: print e write_arff_n_opcode(i,repo +str(i)+ '_result.arff', sample_mal, sample_bin) def opcode_sequence_generator6(repo, dumpMethodDir): db = DatabaseHandler() samples = db.select_sample_all() vector = [] sample_bin_banck = [] sample_mal_banck = [] seen = set() for item in samples: sample_mal = [] sample_bin = [] type = 1 try: if item[1].endswith(".apk"): dump_all_method(repo + item[1], dumpMethodDir) for i in range(2,11): opcode_sequence = check_opcode(dumpMethodDir, i) # Add opcode seq to class belong if item[1].startswith('bin_') and item[1].endswith(".apk"): sample_bin.append(opcode_sequence) type = 1 elif item[1].endswith(".apk"): sample_mal.append(opcode_sequence) type = 2 # Generate a Sequence banck if type == 1: sample_bin_banck.append(sample_bin) else: sample_mal_banck.append(sample_mal) clean_up_folder(dumpMethodDir) except Exception as e: print e for x in range(0,9): sample_mal_1 = [] sample_bin_1 = [] for y in range(0,len(sample_bin_banck)): sample_bin_1.append(sample_bin_banck[y][x]) for y in range(0, len(sample_mal_banck)): sample_mal_1.append(sample_mal_banck[y][x]) mal_class_w = func_weight_freq(sample_mal_1) bin_class_w = func_weight_freq(sample_bin_1) write_arff_n_opcode(x+2,repo +str(x+2)+ '_result.arff', mal_class_w, bin_class_w) def scan_with_virus_total(path, db=None): files = get_all_files_in_directory(path) for afile in files: try: if '.DS_Store' not in afile: make_virus_total_request(afile.split('.')[0]) except Exception as e: print e def make_virus_total_request(hash, db=None): try: params = {'apikey': 'YOUR_LEY', 'resource': hash} data = urllib.urlencode(params) result = urllib2.urlopen('https://www.virustotal.com/vtapi/v2/file/report', data) jdata = json.loads(result.read()) return parse(jdata, hash) except Exception as e: print e return 'Forbidden' def parse(it, md5, verbose=True, jsondump=True): if it['response_code'] == 0: print md5 + " -- Not Found in VT" return 0 else: return it['positives'] def check_opcode(path_to_dir, n): full_address = (path_to_dir).strip('\n') list_files = get_all_files_withpath_in_directory(full_address) list_general = [] for index in range(0, len(list_files)): temp_file = list_files[index] try: if temp_file.endswith('.ag'): list_opcode = [] file_open = open(temp_file) print temp_file for m in file_open: b = m.strip() if b.startswith('1') or b.startswith('2') or b.startswith('3') or b.startswith('4') or b.startswith('5') or b.startswith('6') or b.startswith('7') or b.startswith('8') or b.startswith('9') or b.startswith('0'): word = [] word = m.strip().split() if len(word) >= 2: list_opcode.append(word[2]) list_general.append(word[2]) print list_opcode except Exception as e: print e list_opcode_sequence = [] for item in range(0, (len(list_general) - n + 1)): if n==2: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]) elif n==3: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]) elif n==4: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]) elif n==5: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]) elif n==6: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]) elif n==7: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6]) elif n==8: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6]+' ' + list_general[item + 7]) elif n==9: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6]+' ' + list_general[item + 7]+' ' + list_general[item + 8]) elif n==10: list_opcode_sequence.append(list_general[item] + ' ' + list_general[item + 1]+ ' ' + list_general[item + 2]+' ' + list_general[item + 3]+' ' + list_general[item + 4]+' ' + list_general[item + 5]+' ' + list_general[item + 6]+' ' + list_general[item + 7]+' ' + list_general[item + 8]+' ' + list_general[item + 9]) return list_opcode_sequence def check_opcode2(path_to_dir): full_address = (path_to_dir).strip('\n') list_files = get_all_files_withpath_in_directory(full_address) list_general = [] for index in range(0, len(list_files)): temp_file = list_files[index] try: if temp_file.endswith('.ag'): list_opcode = [] file_open = open(temp_file) print temp_file for m in file_open: b = m.strip() if b.startswith('1') or b.startswith('2') or b.startswith('3') or b.startswith('4') or b.startswith('5') or b.startswith('6') or b.startswith('7') or b.startswith('8') or b.startswith('9') or b.startswith('0'): word = [] word = m.strip().split() if len(word) >= 2: list_opcode.append(word[2]) list_general.append(word[2]) print list_opcode except Exception as e: print e return list_general def fill_samples_table(repo): db = DatabaseHandler() db.recreats_table_samples() files = get_all_files_in_directory(repo) for afile in files: try: if '.DS_Store' not in afile: db.insert_a_sample(afile, '') except Exception as e: print e def update_samples_label(repo): db = DatabaseHandler() samples = db.select_sample_all() for item in samples: isSend = False while not isSend: lable = make_virus_total_request(item[1].split('.')[0]) if 'Forbidden' != lable: shash = unicodedata.normalize('NFKD', item[1]).encode('ascii', 'ignore') rowcount = db.update_sample_lable(shash, lable) print item[0], ' -> ', item[1], " : ", lable, ' RowCount : ', str(rowcount) if (int(lable) == 0): copyfile(repo + item[1], repo + "0/" + item[1]) elif (int(lable) == 1): copyfile(repo + item[1], repo + "1/" + item[1]) elif int(lable) > 1 and int(lable) <= 5: copyfile(repo + item[1], repo + "5/" + item[1]) elif int(lable) > 5 and int(lable) <= 10: copyfile(repo + item[1], repo + "10/" + item[1]) else: copyfile(repo + item[1], repo + "more/" + item[1]) isSend = True else: print item[0], ' -> ', item[1], ' : Forbidden' time.sleep(120) def n_opcode_progress(repo, dump_Method_dir): fill_samples_table(repo) opcode_sequence_generator6(repo, dump_Method_dir) def run_whole_process(repo, dump_Method_dir): fill_samples_table(repo) opcode_sequence_generator4(repo, dump_Method_dir) def menu_select(): db = DatabaseHandler() repo = '/Users/midnightgeek/Repo/11/l12/' dump_Method_dir = '/Users/midnightgeek/Tools/test2' print '********* DataSet Generator *********' print 'Enter 1 For Run LDA' print 'Enter 2 For Fill Samples Table' print 'Enter 3 For Lable Sample With VT Api' print 'Enter 4 For Clear Samples Table' print 'Enter 5 For capture Image' print 'Enter 6 For Run n-opcode' menu = raw_input("Enter Number : ") if menu == '1': run_whole_process(repo, dump_Method_dir) elif menu == '2': fill_samples_table(repo, dump_Method_dir) elif menu == '3': update_samples_label(repo) elif menu == '4': db.clear_table_samples() elif menu == '5': fill_samples_table(repo) capture_image(repo,dump_Method_dir) elif menu == '6': n_opcode_progress(repo, dump_Method_dir) else: print 'Wrong Number' if __name__ == '__main__': menu_select()
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2009 Zuza Software Foundation # # This file is part of the Translate Toolkit. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """ Contains the base :class:`StringElem` class that represents a node in a parsed rich-string tree. It is the base class of all placeables. """ import logging import sys class ElementNotFoundError(ValueError): pass class StringElem(object): """ This class represents a sub-tree of a string parsed into a rich structure. It is also the base class of all placeables. """ renderer = None """An optional function that returns the Unicode representation of the string.""" sub = [] """The sub-elements that make up this this string.""" has_content = True """Whether this string can have sub-elements.""" iseditable = True """Whether this string should be changable by the user. Not used at the moment.""" isfragile = False """Whether this element should be deleted in its entirety when partially deleted. Only checked when ``iseditable = False``""" istranslatable = True """Whether this string is translatable into other languages.""" isvisible = True """Whether this string should be visible to the user. Not used at the moment.""" # INITIALIZERS # def __init__(self, sub=None, id=None, rid=None, xid=None, **kwargs): if sub is None: self.sub = [] elif isinstance(sub, (unicode, StringElem)): self.sub = [sub] else: for elem in sub: if not isinstance(elem, (unicode, StringElem)): raise ValueError(elem) self.sub = sub self.prune() self.id = id self.rid = rid self.xid = xid for key, value in kwargs.items(): if hasattr(self, key): raise ValueError('attribute already exists: %s' % (key)) setattr(self, key, value) # SPECIAL METHODS # def __add__(self, rhs): """Emulate the ``unicode`` class.""" return unicode(self) + rhs def __contains__(self, item): """Emulate the ``unicode`` class.""" return item in unicode(self) def __eq__(self, rhs): """:returns: ``True`` if (and only if) all members as well as sub-trees are equal. False otherwise.""" if not isinstance(rhs, StringElem): return False return self.id == rhs.id and \ self.iseditable == rhs.iseditable and \ self.istranslatable == rhs.istranslatable and \ self.isvisible == rhs.isvisible and \ self.rid == rhs.rid and \ self.xid == rhs.xid and \ len(self.sub) == len(rhs.sub) and \ not [i for i in range(len(self.sub)) if self.sub[i] != rhs.sub[i]] def __ge__(self, rhs): """Emulate the ``unicode`` class.""" return unicode(self) >= rhs def __getitem__(self, i): """Emulate the ``unicode`` class.""" return unicode(self)[i] def __getslice__(self, i, j): """Emulate the ``unicode`` class.""" return unicode(self)[i:j] def __gt__(self, rhs): """Emulate the ``unicode`` class.""" return unicode(self) > rhs def __iter__(self): """Create an iterator of this element's sub-elements.""" for elem in self.sub: yield elem def __le__(self, rhs): """Emulate the ``unicode`` class.""" return unicode(self) <= rhs def __len__(self): """Emulate the ``unicode`` class.""" return len(unicode(self)) def __lt__(self, rhs): """Emulate the ``unicode`` class.""" return unicode(self) < rhs def __mul__(self, rhs): """Emulate the ``unicode`` class.""" return unicode(self) * rhs def __ne__(self, rhs): return not self.__eq__(rhs) def __radd__(self, lhs): """Emulate the ``unicode`` class.""" return self + lhs def __rmul__(self, lhs): """Emulate the ``unicode`` class.""" return self * lhs def __repr__(self): elemstr = ', '.join([repr(elem) for elem in self.sub]) return '<%(class)s(%(id)s%(rid)s%(xid)s[%(subs)s])>' % { 'class': self.__class__.__name__, 'id': self.id is not None and 'id="%s" ' % (self.id) or '', 'rid': self.rid is not None and 'rid="%s" ' % (self.rid) or '', 'xid': self.xid is not None and 'xid="%s" ' % (self.xid) or '', 'subs': elemstr, } def __str__(self): if not self.isvisible: return '' return ''.join([unicode(elem).encode('utf-8') for elem in self.sub]) def __unicode__(self): if callable(self.renderer): return self.renderer(self) if not self.isvisible: return u'' return u''.join([unicode(elem) for elem in self.sub]) # METHODS # def apply_to_strings(self, f): """Apply ``f`` to all actual strings in the tree. :param f: Must take one (str or unicode) argument and return a string or unicode. """ for elem in self.flatten(): for i in range(len(elem.sub)): if isinstance(elem.sub[i], basestring): elem.sub[i] = f(elem.sub[i]) def copy(self): """Returns a copy of the sub-tree. This should be overridden in sub-classes with more data. .. note:: ``self.renderer`` is **not** copied.""" #logging.debug('Copying instance of class %s' % (self.__class__.__name__)) cp = self.__class__(id=self.id, xid=self.xid, rid=self.rid) for sub in self.sub: if isinstance(sub, StringElem): cp.sub.append(sub.copy()) else: cp.sub.append(sub.__class__(sub)) return cp def delete_elem(self, elem): if elem is self: self.sub = [] return parent = self.get_parent_elem(elem) if parent is None: raise ElementNotFoundError(repr(elem)) subidx = -1 for i in range(len(parent.sub)): if parent.sub[i] is elem: subidx = i break if subidx < 0: raise ElementNotFoundError(repr(elem)) del parent.sub[subidx] def delete_range(self, start_index, end_index): """Delete the text in the range given by the string-indexes ``start_index`` and ``end_index``. Partial nodes will only be removed if they are editable. :returns: A ``StringElem`` representing the removed sub-string, the parent node from which it was deleted as well as the offset at which it was deleted from. ``None`` is returned for the parent value if the root was deleted. If the parent and offset values are not ``None``, ``parent.insert(offset, deleted)`` effectively undoes the delete.""" if start_index == end_index: return StringElem(), self, 0 if start_index > end_index: raise IndexError('start_index > end_index: %d > %d' % (start_index, end_index)) if start_index < 0 or start_index > len(self): raise IndexError('start_index: %d' % (start_index)) if end_index < 1 or end_index > len(self) + 1: raise IndexError('end_index: %d' % (end_index)) start = self.get_index_data(start_index) if isinstance(start['elem'], tuple): # If {start} is "between" elements, we use the one on the "right" start['elem'] = start['elem'][-1] start['offset'] = start['offset'][-1] end = self.get_index_data(end_index) if isinstance(end['elem'], tuple): # If {end} is "between" elements, we use the one on the "left" end['elem'] = end['elem'][0] end['offset'] = end['offset'][0] assert start['elem'].isleaf() and end['elem'].isleaf() #logging.debug('FROM %s TO %s' % (start, end)) # Ranges can be one of 3 types: # 1) The entire string. # 2) An entire element. # 3) Restricted to a single element. # 4) Spans multiple elements (start- and ending elements are # not the same). # Case 1: Entire string # if start_index == 0 and end_index == len(self): #logging.debug('Case 1: [%s]' % (unicode(self))) removed = self.copy() self.sub = [] return removed, None, None # Case 2: An entire element # if (start['elem'] is end['elem'] and start['offset'] == 0 and end['offset'] == len(start['elem']) or (not start['elem'].iseditable and start['elem'].isfragile)): ##### FOR DEBUGGING ##### #s = '' #for e in self.flatten(): # if e is start['elem']: # s += '[' + unicode(e) + ']' # else: # s += unicode(e) #logging.debug('Case 2: %s' % (s)) ######################### if start['elem'] is self and self.__class__ is StringElem: removed = self.copy() self.sub = [] return removed, None, None removed = start['elem'].copy() parent = self.get_parent_elem(start['elem']) offset = parent.elem_offset(start['elem']) # Filter out start['elem'] below with a list comprehension in stead # of using parent.sub.remove(), becase list.remove() tests value # and not identity, which is what we want here. This ensures that # start['elem'] is removed and not the first element that is equal # to it. parent.sub = [i for i in parent.sub if i is not start['elem']] return removed, parent, offset # Case 3: Within a single element # if start['elem'] is end['elem'] and start['elem'].iseditable: ##### FOR DEBUGGING ##### #s = '' #for e in self.flatten(): # if e is start['elem']: # s += '%s[%s]%s' % ( # e[:start['offset']], # e[start['offset']:end['offset']], # e[end['offset']:] # ) # else: # s += unicode(e) #logging.debug('Case 3: %s' % (s)) ######################### # XXX: This might not have the expected result if start['elem'] # is a StringElem sub-class instance. newstr = u''.join(start['elem'].sub) removed = StringElem(newstr[start['offset']:end['offset']]) newstr = newstr[:start['offset']] + newstr[end['offset']:] parent = self.get_parent_elem(start['elem']) if parent is None and start['elem'] is self: parent = self start['elem'].sub = [newstr] self.prune() return removed, start['elem'], start['offset'] # Case 4: Across multiple elements # range_nodes = self.depth_first() startidx = 0 endidx = -1 for i in range(len(range_nodes)): if range_nodes[i] is start['elem']: startidx = i elif range_nodes[i] is end['elem']: endidx = i break range_nodes = range_nodes[startidx:endidx+1] #assert (range_nodes[0] is start['elem'] and # range_nodes[-1] is end['elem']) #logging.debug("Nodes in delete range: %s" % (str(range_nodes))) marked_nodes = [] # Contains nodes that have been marked for deletion (directly or inderectly (via parent)). for node in range_nodes[1:-1]: if [n for n in marked_nodes if n is node]: continue subtree = node.depth_first() if not [e for e in subtree if e is end['elem']]: #logging.debug("Marking node: %s" % (subtree)) marked_nodes.extend(subtree) # "subtree" does not include "node" ##### FOR DEBUGGING ##### #s = '' #for e in self.flatten(): # if e is start['elem']: # s += '%s[%s' % (e[:start['offset']], e[start['offset']:]) # elif e is end['elem']: # s += '%s]%s' % (e[:end['offset']], e[end['offset']:]) # else: # s += unicode(e) #logging.debug('Case 4: %s' % (s)) ######################### removed = self.copy() # Save offsets before we start changing the tree start_offset = self.elem_offset(start['elem']) end_offset = self.elem_offset(end['elem']) for node in marked_nodes: try: self.delete_elem(node) except ElementNotFoundError, e: pass if start['elem'] is not end['elem']: if (start_offset == start['index'] or (not start['elem'].iseditable and start['elem'].isfragile)): self.delete_elem(start['elem']) elif start['elem'].iseditable: start['elem'].sub = [u''.join(start['elem'].sub)[:start['offset']]] if (end_offset + len(end['elem']) == end['index'] or (not end['elem'].iseditable and end['elem'].isfragile)): self.delete_elem(end['elem']) elif end['elem'].iseditable: end['elem'].sub = [u''.join(end['elem'].sub)[end['offset']:]] self.prune() return removed, None, None def depth_first(self, filter=None): """Returns a list of the nodes in the tree in depth-first order.""" if filter is None or not callable(filter): filter = lambda e: True elems = [] if filter(self): elems.append(self) for sub in self.sub: if not isinstance(sub, StringElem): continue if sub.isleaf() and filter(sub): elems.append(sub) else: elems.extend(sub.depth_first()) return elems def encode(self, encoding=sys.getdefaultencoding()): """More ``unicode`` class emulation.""" return unicode(self).encode(encoding) def elem_offset(self, elem): """Find the offset of ``elem`` in the current tree. This cannot be reliably used if ``self.renderer`` is used and even less so if the rendering function renders the string differently upon different calls. In Virtaal the ``StringElemGUI.index()`` method is used as replacement for this one. :returns: The string index where element ``e`` starts, or -1 if ``e`` was not found.""" offset = 0 for e in self.iter_depth_first(): if e is elem: return offset if e.isleaf(): offset += len(e) # If we can't find the same instance element, settle for one that # looks like it offset = 0 for e in self.iter_depth_first(): if e.isleaf(): leafoffset = 0 for s in e.sub: if unicode(s) == unicode(elem): return offset + leafoffset else: leafoffset += len(unicode(s)) offset += len(e) return -1 def elem_at_offset(self, offset): """Get the ``StringElem`` in the tree that contains the string rendered at the given offset.""" if offset < 0 or offset > len(self): return None length = 0 elem = None for elem in self.flatten(): elem_len = len(elem) if length <= offset < length + elem_len: return elem length += elem_len return elem def find(self, x): """Find sub-string ``x`` in this string tree and return the position at which it starts.""" if isinstance(x, basestring): return unicode(self).find(x) if isinstance(x, StringElem): return unicode(self).find(unicode(x)) return None def find_elems_with(self, x): """Find all elements in the current sub-tree containing ``x``.""" return [elem for elem in self.flatten() if x in unicode(elem)] def flatten(self, filter=None): """Flatten the tree by returning a depth-first search over the tree's leaves.""" if filter is None or not callable(filter): filter = lambda e: True return [elem for elem in self.iter_depth_first(lambda e: e.isleaf() and filter(e))] def get_ancestor_where(self, child, criteria): parent = self.get_parent_elem(child) if parent is None or criteria(parent): return parent return self.get_ancestor_where(parent, criteria) def get_index_data(self, index): """Get info about the specified range in the tree. :returns: A dictionary with the following items: * *elem*: The element in which ``index`` resides. * *index*: Copy of the ``index`` parameter * *offset*: The offset of ``index`` into ``'elem'``. """ info = { 'elem': self.elem_at_offset(index), 'index': index, } info['offset'] = info['index'] - self.elem_offset(info['elem']) # Check if there "index" is actually between elements leftelem = self.elem_at_offset(index - 1) if leftelem is not None and leftelem is not info['elem']: info['elem'] = (leftelem, info['elem']) info['offset'] = (len(leftelem), 0) return info def get_parent_elem(self, child): """Searches the current sub-tree for and returns the parent of the ``child`` element.""" for elem in self.iter_depth_first(): if not isinstance(elem, StringElem): continue for sub in elem.sub: if sub is child: return elem return None def insert(self, offset, text, preferred_parent=None): """Insert the given text at the specified offset of this string-tree's string (Unicode) representation.""" if offset < 0 or offset > len(self) + 1: raise IndexError('Index out of range: %d' % (offset)) if isinstance(text, (str, unicode)): text = StringElem(text) if not isinstance(text, StringElem): raise ValueError('text must be of type StringElem') def checkleaf(elem, text): if elem.isleaf() and type(text) is StringElem and text.isleaf(): return unicode(text) return text # There are 4 general cases (including specific cases) where text can # be inserted: # 1) At the beginning of the string (self) # 1.1) self.sub[0] is editable # 1.2) self.sub[0] is not editable # 2) At the end of the string (self) # 3) In the middle of a node # 4) Between two nodes # 4.1) Neither of the nodes are editable # 4.2) Both nodes are editable # 4.3) Node at offset-1 is editable, node at offset is not # 4.4) Node at offset is editable, node at offset-1 is not oelem = self.elem_at_offset(offset) # Case 1 # if offset == 0: # 1.1 # if oelem.iseditable: #logging.debug('Case 1.1') oelem.sub.insert(0, checkleaf(oelem, text)) oelem.prune() return True # 1.2 # else: #logging.debug('Case 1.2') oparent = self.get_ancestor_where(oelem, lambda x: x.iseditable) if oparent is not None: oparent.sub.insert(0, checkleaf(oparent, text)) return True else: self.sub.insert(0, checkleaf(self, text)) return True return False # Case 2 # if offset >= len(self): #logging.debug('Case 2') last = self.flatten()[-1] parent = self.get_ancestor_where(last, lambda x: x.iseditable) if parent is None: parent = self parent.sub.append(checkleaf(parent, text)) return True before = self.elem_at_offset(offset - 1) # Case 3 # if oelem is before: if oelem.iseditable: #logging.debug('Case 3') eoffset = offset - self.elem_offset(oelem) if oelem.isleaf(): s = unicode(oelem) # Collapse all sibling strings into one head = s[:eoffset] tail = s[eoffset:] if type(text) is StringElem and text.isleaf(): oelem.sub = [head + unicode(text) + tail] else: oelem.sub = [StringElem(head), text, StringElem(tail)] return True else: return oelem.insert(eoffset, text) return False # And the only case left: Case 4 # # 4.1 # if not before.iseditable and not oelem.iseditable: #logging.debug('Case 4.1') # Neither are editable, so we add it as a sibling (to the right) # of before bparent = self.get_parent_elem(before) # bparent cannot be a leaf (because it has before as a child), so # we insert the text as StringElem(text) bindex = bparent.sub.index(before) bparent.sub.insert(bindex + 1, text) return True # 4.2 # elif before.iseditable and oelem.iseditable: #logging.debug('Case 4.2') # We can add to either, but we try hard to add to the correct one # so that we avoid inserting text in the wrong place on undo, for # example. preferred_type = type(preferred_parent) before_type = type(before) oelem_type = type(oelem) if preferred_parent is oelem: # The preferred parent is still in this StringElem return oelem.insert(0, text) elif oelem_type == preferred_type and not before_type == preferred_type: # oelem has the right type and before has the wrong type return oelem.insert(0, text) elif oelem_type != preferred_type and before_type != preferred_type: # Both are the wrong type, so we add it as if neither were # editable bparent = self.get_parent_elem(before) bindex = bparent.sub.index(before) bparent.sub.insert(bindex + 1, text) return True return before.insert(len(before) + 1, text) # Reinterpret as a case 2 # 4.3 # elif before.iseditable and not oelem.iseditable: #logging.debug('Case 4.3') return before.insert(len(before) + 1, text) # Reinterpret as a case 2 # 4.4 # elif not before.iseditable and oelem.iseditable: #logging.debug('Case 4.4') return oelem.insert(0, text) # Reinterpret as a case 1 return False def insert_between(self, left, right, text): """Insert the given text between the two parameter ``StringElem``\s.""" if not isinstance(left, StringElem) and left is not None: raise ValueError('"left" is not a StringElem or None') if not isinstance(right, StringElem) and right is not None: raise ValueError('"right" is not a StringElem or None') if left is right: if left.sub: # This is an error because the cursor cannot be inside an # element ("left is right"), if it has any other content. # If an element has content, it will be at least directly # left or directly right of the current cursor position. raise ValueError('"left" and "right" refer to the same element and is not empty.') if not left.iseditable: return False if isinstance(text, unicode): text = StringElem(text) if left is right: #logging.debug('left%s.sub.append(%s)' % (repr(left), repr(text))) left.sub.append(text) return True # XXX: The "in" keyword is *not* used below, because the "in" tests # with __eq__ and not "is", as we do below. Testing for identity is # intentional and required. if left is None: if self is right: #logging.debug('self%s.sub.insert(0, %s)' % # (repr(self), repr(text))) self.sub.insert(0, text) return True parent = self.get_parent_elem(right) if parent is not None: #logging.debug('parent%s.sub.insert(0, %s)' % # (repr(parent), repr(text))) parent.sub.insert(0, text) return True return False if right is None: if self is left: #logging.debug('self%s.sub.append(%s)' % # (repr(self), repr(text))) self.sub.append(text) return True parent = self.get_parent_elem(left) if parent is not None: #logging.debug('parent%s.sub.append(%s)' % # (repr(parent), repr(text))) parent.sub.append(text) return True return False # The following two blocks handle the cases where one element # "surrounds" another as its parent. In that way the parent would be # "left" of its first child, like in the first case. ischild = False for sub in left.sub: if right is sub: ischild = True break if ischild: #logging.debug('left%s.sub.insert(0, %s)' % # (repr(left), repr(text))) left.sub.insert(0, text) return True ischild = False for sub in right.sub: if left is sub: ischild = True break if ischild: #logging.debug('right%s.sub.append(%s)' % # (repr(right), repr(text))) right.sub.append(text) return True parent = self.get_parent_elem(left) if parent.iseditable: idx = 1 for child in parent.sub: if child is left: break idx += 1 #logging.debug('parent%s.sub.insert(%d, %s)' % # (repr(parent), idx, repr(text))) parent.sub.insert(idx, text) return True parent = self.get_parent_elem(right) if parent.iseditable: idx = 0 for child in parent.sub: if child is right: break idx += 1 #logging.debug('parent%s.sub.insert(%d, %s)' % # (repr(parent), idx, repr(text))) parent.sub.insert(0, text) return True logging.debug('Could not insert between %s and %s... odd.' % (repr(left), repr(right))) return False def isleaf(self): """ Whether or not this instance is a leaf node in the ``StringElem`` tree. A node is a leaf node if it is a ``StringElem`` (not a sub-class) and contains only sub-elements of type ``str`` or ``unicode``. :rtype: bool """ for e in self.sub: if not isinstance(e, (str, unicode)): return False return True def iter_depth_first(self, filter=None): """Iterate through the nodes in the tree in dept-first order.""" if filter is None or not callable(filter): filter = lambda e: True if filter(self): yield self for sub in self.sub: if not isinstance(sub, StringElem): continue if sub.isleaf() and filter(sub): yield sub else: for node in sub.iter_depth_first(filter): yield node def map(self, f, filter=None): """Apply ``f`` to all nodes for which ``filter`` returned ``True`` (optional).""" if filter is not None and not callable(filter): raise ValueError('filter is not callable or None') if filter is None: filter = lambda e: True for elem in self.depth_first(): if filter(elem): f(elem) @classmethod def parse(cls, pstr): """Parse an instance of this class from the start of the given string. This method should be implemented by any sub-class that wants to parseable by :mod:`translate.storage.placeables.parse`. :type pstr: unicode :param pstr: The string to parse into an instance of this class. :returns: An instance of the current class, or ``None`` if the string not parseable by this class.""" return cls(pstr) def print_tree(self, indent=0, verbose=False): """Print the tree from the current instance's point in an indented manner.""" indent_prefix = " " * indent * 2 out = (u"%s%s [%s]" % (indent_prefix, self.__class__.__name__, unicode(self))).encode('utf-8') if verbose: out += u' ' + repr(self) print out for elem in self.sub: if isinstance(elem, StringElem): elem.print_tree(indent + 1, verbose=verbose) else: print (u'%s%s[%s]' % (indent_prefix, indent_prefix, elem)).encode('utf-8') def prune(self): """Remove unnecessary nodes to make the tree optimal.""" changed = False for elem in self.iter_depth_first(): if len(elem.sub) == 1: child = elem.sub[0] # Symbolically: X->StringElem(leaf) => X(leaf) # (where X is any sub-class of StringElem, # but not StringElem) if type(child) is StringElem and child.isleaf(): elem.sub = child.sub # Symbolically: # StringElem->StringElem2->(leaves) => StringElem->(leaves) if type(elem) is StringElem and type(child) is StringElem: elem.sub = child.sub changed = True # Symbolically: StringElem->X(leaf) => X(leaf) # (where X is any sub-class of StringElem, # but not StringElem) if (type(elem) is StringElem and isinstance(child, StringElem) and type(child) is not StringElem): parent = self.get_parent_elem(elem) if parent is not None: parent.sub[parent.sub.index(elem)] = child changed = True if type(elem) is StringElem and elem.isleaf(): # Collapse all strings in this leaf into one string. elem.sub = [u''.join(elem.sub)] for i in reversed(range(len(elem.sub))): # Remove empty strings or StringElem nodes # (but not StringElem sub-class instances, because they # might contain important (non-rendered) data. if (type(elem.sub[i]) in (StringElem, str, unicode) and len(elem.sub[i]) == 0): del elem.sub[i] continue if type(elem.sub[i]) in (str, unicode) and not elem.isleaf(): elem.sub[i] = StringElem(elem.sub[i]) changed = True # Merge sibling StringElem leaves if not elem.isleaf(): leafchanged = True while leafchanged: leafchanged = False for i in range(len(elem.sub) - 1): lsub = elem.sub[i] rsub = elem.sub[i+1] if (type(lsub) is StringElem and type(rsub) is StringElem): changed = True lsub.sub.extend(rsub.sub) del elem.sub[i+1] leafchanged = True break # If any changes were made, call prune() again to make sure that # changes made later does not create situations fixed by earlier # checks. if changed: self.prune() # TODO: Write unit test for this method def remove_type(self, ptype): """Replace nodes with type ``ptype`` with base ``StringElem``\s, containing the same sub-elements. This is only applicable to elements below the element tree root node.""" for elem in self.iter_depth_first(): if type(elem) is ptype: parent = self.get_parent_elem(elem) pindex = parent.sub.index(elem) parent.sub[pindex] = StringElem( sub=elem.sub, id=elem.id, xid=elem.xid, rid=elem.rid, ) def translate(self): """Transform the sub-tree according to some class-specific needs. This method should be either overridden in implementing sub-classes or dynamically replaced by specific applications. :returns: The transformed Unicode string representing the sub-tree. """ return self.copy()
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0212 import fcntl import logging import os import psutil from pylib import cmd_helper from pylib import constants from pylib import valgrind_tools # TODO(jbudorick) Remove once telemetry gets switched over. import pylib.android_commands import pylib.device.device_utils def _GetProcessStartTime(pid): return psutil.Process(pid).create_time class _FileLock(object): """With statement-aware implementation of a file lock. File locks are needed for cross-process synchronization when the multiprocessing Python module is used. """ def __init__(self, path): self._fd = -1 self._path = path def __enter__(self): self._fd = os.open(self._path, os.O_RDONLY | os.O_CREAT) if self._fd < 0: raise Exception('Could not open file %s for reading' % self._path) fcntl.flock(self._fd, fcntl.LOCK_EX) def __exit__(self, _exception_type, _exception_value, traceback): fcntl.flock(self._fd, fcntl.LOCK_UN) os.close(self._fd) class Forwarder(object): """Thread-safe class to manage port forwards from the device to the host.""" _DEVICE_FORWARDER_FOLDER = (constants.TEST_EXECUTABLE_DIR + '/forwarder/') _DEVICE_FORWARDER_PATH = (constants.TEST_EXECUTABLE_DIR + '/forwarder/device_forwarder') _LOCK_PATH = '/tmp/chrome.forwarder.lock' # Defined in host_forwarder_main.cc _HOST_FORWARDER_LOG = '/tmp/host_forwarder_log' _instance = None @staticmethod def Map(port_pairs, device, tool=None): """Runs the forwarder. Args: port_pairs: A list of tuples (device_port, host_port) to forward. Note that you can specify 0 as a device_port, in which case a port will by dynamically assigned on the device. You can get the number of the assigned port using the DevicePortForHostPort method. device: A DeviceUtils instance. tool: Tool class to use to get wrapper, if necessary, for executing the forwarder (see valgrind_tools.py). Raises: Exception on failure to forward the port. """ # TODO(jbudorick) Remove once telemetry gets switched over. assert not isinstance(device, pylib.android_commands.AndroidCommands) if not tool: tool = valgrind_tools.CreateTool(None, device) with _FileLock(Forwarder._LOCK_PATH): instance = Forwarder._GetInstanceLocked(tool) instance._InitDeviceLocked(device, tool) device_serial = str(device) redirection_commands = [ ['--adb=' + constants.GetAdbPath(), '--serial-id=' + device_serial, '--map', str(device_port), str(host_port)] for device_port, host_port in port_pairs] logging.info('Forwarding using commands: %s', redirection_commands) for redirection_command in redirection_commands: try: (exit_code, output) = cmd_helper.GetCmdStatusAndOutput( [instance._host_forwarder_path] + redirection_command) except OSError as e: if e.errno == 2: raise Exception('Unable to start host forwarder. Make sure you have' ' built host_forwarder.') else: raise if exit_code != 0: Forwarder._KillDeviceLocked(device, tool) raise Exception('%s exited with %d:\n%s' % ( instance._host_forwarder_path, exit_code, '\n'.join(output))) tokens = output.split(':') if len(tokens) != 2: raise Exception('Unexpected host forwarder output "%s", ' 'expected "device_port:host_port"' % output) device_port = int(tokens[0]) host_port = int(tokens[1]) serial_with_port = (device_serial, device_port) instance._device_to_host_port_map[serial_with_port] = host_port instance._host_to_device_port_map[host_port] = serial_with_port logging.info('Forwarding device port: %d to host port: %d.', device_port, host_port) @staticmethod def UnmapDevicePort(device_port, device): """Unmaps a previously forwarded device port. Args: device: A DeviceUtils instance. device_port: A previously forwarded port (through Map()). """ # TODO(jbudorick) Remove once telemetry gets switched over. assert not isinstance(device, pylib.android_commands.AndroidCommands) with _FileLock(Forwarder._LOCK_PATH): Forwarder._UnmapDevicePortLocked(device_port, device) @staticmethod def UnmapAllDevicePorts(device): """Unmaps all the previously forwarded ports for the provided device. Args: device: A DeviceUtils instance. port_pairs: A list of tuples (device_port, host_port) to unmap. """ # TODO(jbudorick) Remove once telemetry gets switched over. assert not isinstance(device, pylib.android_commands.AndroidCommands) with _FileLock(Forwarder._LOCK_PATH): if not Forwarder._instance: return adb_serial = str(device) if adb_serial not in Forwarder._instance._initialized_devices: return port_map = Forwarder._GetInstanceLocked( None)._device_to_host_port_map for (device_serial, device_port) in port_map.keys(): if adb_serial == device_serial: Forwarder._UnmapDevicePortLocked(device_port, device) # There are no more ports mapped, kill the device_forwarder. tool = valgrind_tools.CreateTool(None, device) Forwarder._KillDeviceLocked(device, tool) @staticmethod def DevicePortForHostPort(host_port): """Returns the device port that corresponds to a given host port.""" with _FileLock(Forwarder._LOCK_PATH): (_device_serial, device_port) = Forwarder._GetInstanceLocked( None)._host_to_device_port_map.get(host_port) return device_port @staticmethod def RemoveHostLog(): if os.path.exists(Forwarder._HOST_FORWARDER_LOG): os.unlink(Forwarder._HOST_FORWARDER_LOG) @staticmethod def GetHostLog(): if not os.path.exists(Forwarder._HOST_FORWARDER_LOG): return '' with file(Forwarder._HOST_FORWARDER_LOG, 'r') as f: return f.read() @staticmethod def _GetInstanceLocked(tool): """Returns the singleton instance. Note that the global lock must be acquired before calling this method. Args: tool: Tool class to use to get wrapper, if necessary, for executing the forwarder (see valgrind_tools.py). """ if not Forwarder._instance: Forwarder._instance = Forwarder(tool) return Forwarder._instance def __init__(self, tool): """Constructs a new instance of Forwarder. Note that Forwarder is a singleton therefore this constructor should be called only once. Args: tool: Tool class to use to get wrapper, if necessary, for executing the forwarder (see valgrind_tools.py). """ assert not Forwarder._instance self._tool = tool self._initialized_devices = set() self._device_to_host_port_map = dict() self._host_to_device_port_map = dict() self._host_forwarder_path = os.path.join( constants.GetOutDirectory(), 'host_forwarder') assert os.path.exists(self._host_forwarder_path), 'Please build forwarder2' self._device_forwarder_path_on_host = os.path.join( constants.GetOutDirectory(), 'forwarder_dist') self._InitHostLocked() @staticmethod def _UnmapDevicePortLocked(device_port, device): """Internal method used by UnmapDevicePort(). Note that the global lock must be acquired before calling this method. """ instance = Forwarder._GetInstanceLocked(None) serial = str(device) serial_with_port = (serial, device_port) if not serial_with_port in instance._device_to_host_port_map: logging.error('Trying to unmap non-forwarded port %d' % device_port) return redirection_command = ['--adb=' + constants.GetAdbPath(), '--serial-id=' + serial, '--unmap', str(device_port)] (exit_code, output) = cmd_helper.GetCmdStatusAndOutput( [instance._host_forwarder_path] + redirection_command) if exit_code != 0: logging.error('%s exited with %d:\n%s' % ( instance._host_forwarder_path, exit_code, '\n'.join(output))) host_port = instance._device_to_host_port_map[serial_with_port] del instance._device_to_host_port_map[serial_with_port] del instance._host_to_device_port_map[host_port] @staticmethod def _GetPidForLock(): """Returns the PID used for host_forwarder initialization. The PID of the "sharder" is used to handle multiprocessing. The "sharder" is the initial process that forks that is the parent process. """ return os.getpgrp() def _InitHostLocked(self): """Initializes the host forwarder daemon. Note that the global lock must be acquired before calling this method. This method kills any existing host_forwarder process that could be stale. """ # See if the host_forwarder daemon was already initialized by a concurrent # process or thread (in case multi-process sharding is not used). pid_for_lock = Forwarder._GetPidForLock() fd = os.open(Forwarder._LOCK_PATH, os.O_RDWR | os.O_CREAT) with os.fdopen(fd, 'r+') as pid_file: pid_with_start_time = pid_file.readline() if pid_with_start_time: (pid, process_start_time) = pid_with_start_time.split(':') if pid == str(pid_for_lock): if process_start_time == str(_GetProcessStartTime(pid_for_lock)): return self._KillHostLocked() pid_file.seek(0) pid_file.write( '%s:%s' % (pid_for_lock, str(_GetProcessStartTime(pid_for_lock)))) pid_file.truncate() def _InitDeviceLocked(self, device, tool): """Initializes the device_forwarder daemon for a specific device (once). Note that the global lock must be acquired before calling this method. This method kills any existing device_forwarder daemon on the device that could be stale, pushes the latest version of the daemon (to the device) and starts it. Args: device: A DeviceUtils instance. tool: Tool class to use to get wrapper, if necessary, for executing the forwarder (see valgrind_tools.py). """ device_serial = str(device) if device_serial in self._initialized_devices: return Forwarder._KillDeviceLocked(device, tool) device.PushChangedFiles([( self._device_forwarder_path_on_host, Forwarder._DEVICE_FORWARDER_FOLDER)]) cmd = '%s %s' % (tool.GetUtilWrapper(), Forwarder._DEVICE_FORWARDER_PATH) device.RunShellCommand( cmd, env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER}, check_return=True) self._initialized_devices.add(device_serial) def _KillHostLocked(self): """Kills the forwarder process running on the host. Note that the global lock must be acquired before calling this method. """ logging.info('Killing host_forwarder.') (exit_code, output) = cmd_helper.GetCmdStatusAndOutput( [self._host_forwarder_path, '--kill-server']) if exit_code != 0: (exit_code, output) = cmd_helper.GetCmdStatusAndOutput( ['pkill', '-9', 'host_forwarder']) if exit_code != 0: raise Exception('%s exited with %d:\n%s' % ( self._host_forwarder_path, exit_code, '\n'.join(output))) @staticmethod def _KillDeviceLocked(device, tool): """Kills the forwarder process running on the device. Note that the global lock must be acquired before calling this method. Args: device: Instance of DeviceUtils for talking to the device. tool: Wrapper tool (e.g. valgrind) that can be used to execute the device forwarder (see valgrind_tools.py). """ logging.info('Killing device_forwarder.') Forwarder._instance._initialized_devices.discard(str(device)) if not device.FileExists(Forwarder._DEVICE_FORWARDER_PATH): return cmd = '%s %s --kill-server' % (tool.GetUtilWrapper(), Forwarder._DEVICE_FORWARDER_PATH) device.RunShellCommand( cmd, env={'LD_LIBRARY_PATH': Forwarder._DEVICE_FORWARDER_FOLDER}, check_return=True)
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for model_builder under TensorFlow 2.X.""" import os import unittest import tensorflow.compat.v1 as tf from google.protobuf import text_format from object_detection.builders import model_builder from object_detection.builders import model_builder_test from object_detection.core import losses from object_detection.models import center_net_resnet_feature_extractor from object_detection.protos import center_net_pb2 from object_detection.protos import model_pb2 from object_detection.utils import tf_version @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.') class ModelBuilderTF2Test(model_builder_test.ModelBuilderTest): def default_ssd_feature_extractor(self): return 'ssd_resnet50_v1_fpn_keras' def default_faster_rcnn_feature_extractor(self): return 'faster_rcnn_resnet101_keras' def ssd_feature_extractors(self): return model_builder.SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP def get_override_base_feature_extractor_hyperparams(self, extractor_type): return extractor_type in {} def faster_rcnn_feature_extractors(self): return model_builder.FASTER_RCNN_KERAS_FEATURE_EXTRACTOR_CLASS_MAP def get_fake_label_map_file_path(self): keypoint_spec_text = """ item { name: "/m/01g317" id: 1 display_name: "person" keypoints { id: 0 label: 'nose' } keypoints { id: 1 label: 'left_shoulder' } keypoints { id: 2 label: 'right_shoulder' } keypoints { id: 3 label: 'hip' } } """ keypoint_label_map_path = os.path.join( self.get_temp_dir(), 'keypoint_label_map') with tf.gfile.Open(keypoint_label_map_path, 'wb') as f: f.write(keypoint_spec_text) return keypoint_label_map_path def get_fake_keypoint_proto(self): task_proto_txt = """ task_name: "human_pose" task_loss_weight: 0.9 keypoint_regression_loss_weight: 1.0 keypoint_heatmap_loss_weight: 0.1 keypoint_offset_loss_weight: 0.5 heatmap_bias_init: 2.14 keypoint_class_name: "/m/01g317" loss { classification_loss { penalty_reduced_logistic_focal_loss { alpha: 3.0 beta: 4.0 } } localization_loss { l1_localization_loss { } } } keypoint_label_to_std { key: "nose" value: 0.3 } keypoint_label_to_std { key: "hip" value: 0.0 } keypoint_candidate_score_threshold: 0.3 num_candidates_per_keypoint: 12 peak_max_pool_kernel_size: 5 unmatched_keypoint_score: 0.05 box_scale: 1.7 candidate_search_scale: 0.2 candidate_ranking_mode: "score_distance_ratio" offset_peak_radius: 3 per_keypoint_offset: true """ config = text_format.Merge(task_proto_txt, center_net_pb2.CenterNet.KeypointEstimation()) return config def get_fake_object_center_proto(self): proto_txt = """ object_center_loss_weight: 0.5 heatmap_bias_init: 3.14 min_box_overlap_iou: 0.2 max_box_predictions: 15 classification_loss { penalty_reduced_logistic_focal_loss { alpha: 3.0 beta: 4.0 } } """ return text_format.Merge(proto_txt, center_net_pb2.CenterNet.ObjectCenterParams()) def get_fake_object_detection_proto(self): proto_txt = """ task_loss_weight: 0.5 offset_loss_weight: 0.1 scale_loss_weight: 0.2 localization_loss { l1_localization_loss { } } """ return text_format.Merge(proto_txt, center_net_pb2.CenterNet.ObjectDetection()) def get_fake_mask_proto(self): proto_txt = """ task_loss_weight: 0.7 classification_loss { weighted_softmax {} } mask_height: 8 mask_width: 8 score_threshold: 0.7 heatmap_bias_init: -2.0 """ return text_format.Merge(proto_txt, center_net_pb2.CenterNet.MaskEstimation()) def get_fake_densepose_proto(self): proto_txt = """ task_loss_weight: 0.5 class_id: 0 loss { classification_loss { weighted_softmax {} } localization_loss { l1_localization_loss { } } } num_parts: 24 part_loss_weight: 1.0 coordinate_loss_weight: 2.0 upsample_to_input_res: true heatmap_bias_init: -2.0 """ return text_format.Merge(proto_txt, center_net_pb2.CenterNet.DensePoseEstimation()) def test_create_center_net_model(self): """Test building a CenterNet model from proto txt.""" proto_txt = """ center_net { num_classes: 10 feature_extractor { type: "resnet_v2_101" channel_stds: [4, 5, 6] bgr_ordering: true } image_resizer { keep_aspect_ratio_resizer { min_dimension: 512 max_dimension: 512 pad_to_max_dimension: true } } } """ # Set up the configuration proto. config = text_format.Merge(proto_txt, model_pb2.DetectionModel()) config.center_net.object_center_params.CopyFrom( self.get_fake_object_center_proto()) config.center_net.object_detection_task.CopyFrom( self.get_fake_object_detection_proto()) config.center_net.keypoint_estimation_task.append( self.get_fake_keypoint_proto()) config.center_net.keypoint_label_map_path = ( self.get_fake_label_map_file_path()) config.center_net.mask_estimation_task.CopyFrom( self.get_fake_mask_proto()) config.center_net.densepose_estimation_task.CopyFrom( self.get_fake_densepose_proto()) # Build the model from the configuration. model = model_builder.build(config, is_training=True) # Check object center related parameters. self.assertEqual(model._num_classes, 10) self.assertIsInstance(model._center_params.classification_loss, losses.PenaltyReducedLogisticFocalLoss) self.assertEqual(model._center_params.classification_loss._alpha, 3.0) self.assertEqual(model._center_params.classification_loss._beta, 4.0) self.assertAlmostEqual(model._center_params.min_box_overlap_iou, 0.2) self.assertAlmostEqual( model._center_params.heatmap_bias_init, 3.14, places=4) self.assertEqual(model._center_params.max_box_predictions, 15) # Check object detection related parameters. self.assertAlmostEqual(model._od_params.offset_loss_weight, 0.1) self.assertAlmostEqual(model._od_params.scale_loss_weight, 0.2) self.assertAlmostEqual(model._od_params.task_loss_weight, 0.5) self.assertIsInstance(model._od_params.localization_loss, losses.L1LocalizationLoss) # Check keypoint estimation related parameters. kp_params = model._kp_params_dict['human_pose'] self.assertAlmostEqual(kp_params.task_loss_weight, 0.9) self.assertAlmostEqual(kp_params.keypoint_regression_loss_weight, 1.0) self.assertAlmostEqual(kp_params.keypoint_offset_loss_weight, 0.5) self.assertAlmostEqual(kp_params.heatmap_bias_init, 2.14, places=4) self.assertEqual(kp_params.classification_loss._alpha, 3.0) self.assertEqual(kp_params.keypoint_indices, [0, 1, 2, 3]) self.assertEqual(kp_params.keypoint_labels, ['nose', 'left_shoulder', 'right_shoulder', 'hip']) self.assertAllClose(kp_params.keypoint_std_dev, [0.3, 1.0, 1.0, 0.0]) self.assertEqual(kp_params.classification_loss._beta, 4.0) self.assertIsInstance(kp_params.localization_loss, losses.L1LocalizationLoss) self.assertAlmostEqual(kp_params.keypoint_candidate_score_threshold, 0.3) self.assertEqual(kp_params.num_candidates_per_keypoint, 12) self.assertEqual(kp_params.peak_max_pool_kernel_size, 5) self.assertAlmostEqual(kp_params.unmatched_keypoint_score, 0.05) self.assertAlmostEqual(kp_params.box_scale, 1.7) self.assertAlmostEqual(kp_params.candidate_search_scale, 0.2) self.assertEqual(kp_params.candidate_ranking_mode, 'score_distance_ratio') self.assertEqual(kp_params.offset_peak_radius, 3) self.assertEqual(kp_params.per_keypoint_offset, True) # Check mask related parameters. self.assertAlmostEqual(model._mask_params.task_loss_weight, 0.7) self.assertIsInstance(model._mask_params.classification_loss, losses.WeightedSoftmaxClassificationLoss) self.assertEqual(model._mask_params.mask_height, 8) self.assertEqual(model._mask_params.mask_width, 8) self.assertAlmostEqual(model._mask_params.score_threshold, 0.7) self.assertAlmostEqual( model._mask_params.heatmap_bias_init, -2.0, places=4) # Check DensePose related parameters. self.assertEqual(model._densepose_params.class_id, 0) self.assertIsInstance(model._densepose_params.classification_loss, losses.WeightedSoftmaxClassificationLoss) self.assertIsInstance(model._densepose_params.localization_loss, losses.L1LocalizationLoss) self.assertAlmostEqual(model._densepose_params.part_loss_weight, 1.0) self.assertAlmostEqual(model._densepose_params.coordinate_loss_weight, 2.0) self.assertEqual(model._densepose_params.num_parts, 24) self.assertAlmostEqual(model._densepose_params.task_loss_weight, 0.5) self.assertTrue(model._densepose_params.upsample_to_input_res) self.assertEqual(model._densepose_params.upsample_method, 'bilinear') self.assertAlmostEqual( model._densepose_params.heatmap_bias_init, -2.0, places=4) # Check feature extractor parameters. self.assertIsInstance( model._feature_extractor, center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor) self.assertAllClose(model._feature_extractor._channel_means, [0, 0, 0]) self.assertAllClose(model._feature_extractor._channel_stds, [4, 5, 6]) self.assertTrue(model._feature_extractor._bgr_ordering) if __name__ == '__main__': tf.test.main()
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from bigdl.dllib.optim.optimizer import MaxEpoch from bigdl.orca.tfpark.utils import evaluate_string_metrics from bigdl.dllib.utils.file_utils import enable_multi_fs_save, enable_multi_fs_load from bigdl.dllib.nncontext import getOrCreateSparkContext from bigdl.orca.tfpark.tf_dataset import TFNdarrayDataset, TFDataset, \ _standarize_feature_label_dataset, check_data_compatible from bigdl.orca.tfpark.tf_optimizer import TFOptimizer from bigdl.orca.tfpark.tf_predictor import TFPredictor class KerasModel(object): def __init__(self, model, model_dir=None, optimizer=None): """ :param model: a compiled keras model """ self.model = model self.model_dir = model_dir self.optimizer = optimizer import tensorflow as tf self.real_batch_size = tf.shape(self.model.inputs[0])[0] self.metric_tensors = {} def add_metric(self, tensor, name): self.metric_tensors[name] = tensor @property def metrics_names(self): return self.model.metrics_names def get_weights(self): return self.model.get_weights() def set_weights(self, weights): self.model.set_weights(weights) @enable_multi_fs_save def save_weights(self, filepath, overwrite=True, save_format=None): self.model.save_weights(filepath, overwrite=overwrite, save_format=save_format) @enable_multi_fs_load def load_weights(self, filepath, by_name=False): self.model.load_weights(filepath, by_name=by_name) @enable_multi_fs_save def save_model(self, path, overwrite=True): """ Save the model to a single HDF5 file. :param path: String. The path to save the model. :param overwrite: Boolean. Whether to silently overwrite any existing file at the target location """ self.model.save(path, overwrite=overwrite) @staticmethod def load_model(path): """ Load an existing keras model (with weights) from HDF5 file. :param path: String. The path to the pre-defined model. :return: KerasModel. """ from tensorflow.python.keras import models keras_model = models.load_model(path) return KerasModel(keras_model) def fit(self, x=None, y=None, batch_size=None, epochs=1, validation_data=None, distributed=False, **kwargs ): """ Train the model for a fixed num of epochs Arguments: :param x: Input data. It could be: - a TFDataset object - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. :param y: Target data. Like the input data `x`, It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a TFDataset, `y` should not be specified (since targets will be obtained from `x`). :param batch_size: Integer or `None`. Number of samples per gradient update. If `x` is a TFDataset, you do not need to specify batch_size. :param epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` could be: - tuple `(x_val, y_val)` of Numpy arrays or tensors :param distributed: Boolean. Whether to do prediction in distributed mode or local mode. Default is True. In local mode, x must be a Numpy array. """ if isinstance(x, TFDataset): # todo check arguments assert validation_data is None, "validation_data must be None when " \ "using TFDataset as input, please " \ "use set the validation data in TFDataset" if not x.has_batch: raise ValueError("The batch_size of TFDataset must be " + "specified when used in KerasModel fit.") self._fit_distributed(x, epochs, **kwargs) elif distributed: dataset = TFDataset.from_ndarrays((x, y), val_tensors=validation_data, batch_size=batch_size) self._fit_distributed(dataset, epochs, **kwargs) else: self.model.fit(x=x, y=y, batch_size=batch_size, epochs=epochs, validation_data=validation_data, **kwargs ) def _fit_distributed(self, dataset, epochs, **kwargs): self.tf_optimizer = TFOptimizer.from_keras(self.model, dataset, model_dir=self.model_dir, metrics=self.metric_tensors, optimizer=self.optimizer, **kwargs) self.tf_optimizer.optimize(MaxEpoch(epochs)) def evaluate(self, x=None, y=None, batch_per_thread=None, distributed=False ): """ Evaluate a model on a given dataset :param x: Input data. It could be: - a TFDataset object - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. :param y: Target data. Like the input data `x`, It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a TFDataset, `y` should not be specified (since targets will be obtained from `x`). :param batch_per_thread: The default value is 1. When distributed is True,the total batch size is batch_per_thread * rdd.getNumPartitions. When distributed is False the total batch size is batch_per_thread * numOfCores. :param distributed: Boolean. Whether to do prediction in distributed mode or local mode. Default is True. In local mode, x must be a Numpy array. """ if isinstance(x, TFDataset): if not x.has_batch: raise ValueError("The batch_per_thread of TFDataset must be " + "specified when used in KerasModel evaluate.") if isinstance(x, TFNdarrayDataset): x = _standarize_feature_label_dataset(x, self.model) # todo check arguments check_data_compatible(x, self.model, mode="evaluate") return self._evaluate_distributed(x) else: if distributed: dataset = TFDataset.from_ndarrays((x, y), batch_per_thread=-1 if batch_per_thread is None else batch_per_thread ) dataset = _standarize_feature_label_dataset(dataset, self.model) return self._evaluate_distributed(dataset) else: results = self.model.evaluate(x=x, y=y, batch_size=batch_per_thread) results = dict(zip(self.metrics_names, results)) return results def _evaluate_distributed(self, dataset): import tensorflow.keras.backend as K if hasattr(self.model, "targets"): model_targets = self.model.targets else: model_targets = self.model._targets return evaluate_string_metrics(sess=K.get_session(), string_metrics=self.metrics_names, dataset=dataset, inputs=self.model.inputs + model_targets, targets=model_targets, outputs=self.model.outputs, loss=self.model.total_loss) def predict(self, x, batch_per_thread=None, distributed=False): """ Use a model to do prediction. :param x: Input data. It could be: - a TFDataset object - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. :param batch_per_thread: The default value is 1. When distributed is True,the total batch size is batch_per_thread * rdd.getNumPartitions. When distributed is False the total batch size is batch_per_thread * numOfCores. :param distributed: Boolean. Whether to do prediction in distributed mode or local mode. Default is True. In local mode, x must be a Numpy array. """ if isinstance(x, TFDataset): # todo check arguments if not x.has_batch: raise ValueError("The batch_per_thread of TFDataset" + " must be specified when used in KerasModel predict.") return self._predict_distributed(x) else: if distributed: sc = getOrCreateSparkContext() rdd, types, shapes = _create_rdd_x(x, self.model._feed_input_names, sc) dataset = TFDataset.from_rdd(rdd, names=self.model._feed_input_names, types=types, shapes=shapes, batch_per_thread=-1 if batch_per_thread is None else batch_per_thread) results = self._predict_distributed(dataset).collect() output_num = len(self.model.outputs) if output_num == 1: return np.stack(results) else: predictions = [] for i in range(0, output_num): predictions.append(np.stack([res[i] for res in results])) return predictions else: return self.model.predict(x=x, batch_size=batch_per_thread) def _predict_distributed(self, x): predictor = TFPredictor.from_keras(self.model, x) return predictor.predict() def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True): return self.model.train_on_batch(x=x, y=y, sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics) def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True): return self.model.test_on_batch(x=x, y=y, sample_weight=sample_weight, reset_metrics=reset_metrics) def predict_on_batch(self, x): return self.model.predict_on_batch(x) def _create_rdd_x_y(x, y, input_names, output_names, sc): from tensorflow.python.keras.engine import training_utils x = training_utils.standardize_input_data(x, input_names, check_batch_axis=False, exception_prefix='input') y = training_utils.standardize_input_data(y, output_names, shapes=None, check_batch_axis=False, exception_prefix='target') num_samples = x[0].shape[0] num_inputs = len(x) num_targets = len(y) input_data = [] for i in range(num_samples): inputs = [] for j in range(num_inputs): inputs.append(x[j][i]) targets = [] for j in range(num_targets): if y[j][i].ndim == 0: targets.append(np.expand_dims(y[j][i], axis=1)) else: targets.append(y[j][i]) input_data.append((inputs, targets)) x_meta = dict([(input_names[i], (input_data[0][0][i].dtype, input_data[0][0][i].shape)) for i in range(len(input_names))]) y_meta = dict([(output_names[i], (input_data[0][1][i].dtype, input_data[0][1][i].shape)) for i in range(len(input_names))]) rdd = sc.parallelize(input_data) return rdd, x_meta, y_meta def _create_rdd_x(x, input_names, sc): from tensorflow.python.keras.engine import training_utils x = training_utils.standardize_input_data(x, input_names, check_batch_axis=False, exception_prefix='input') num_samples = x[0].shape[0] num_inputs = len(x) input_data = [] for i in range(num_samples): sample = [] for j in range(num_inputs): sample.append(x[j][i]) input_data.append(sample) types = [x.dtype for x in input_data[0]] shapes = [x.shape for x in input_data[0]] rdd = sc.parallelize(input_data) return rdd, types, shapes
"""Unit tests for the bytes and bytearray types. XXX This is a mess. Common tests should be unified with string_tests.py (and the latter should be modernized). """ import array import os import re import sys import copy import functools import pickle import tempfile import unittest import test.support import test.string_tests import test.list_tests from test.support import bigaddrspacetest, MAX_Py_ssize_t if sys.flags.bytes_warning: def check_bytes_warnings(func): @functools.wraps(func) def wrapper(*args, **kw): with test.support.check_warnings(('', BytesWarning)): return func(*args, **kw) return wrapper else: # no-op def check_bytes_warnings(func): return func class Indexable: def __init__(self, value=0): self.value = value def __index__(self): return self.value class BaseBytesTest: def test_basics(self): b = self.type2test() self.assertEqual(type(b), self.type2test) self.assertEqual(b.__class__, self.type2test) def test_copy(self): a = self.type2test(b"abcd") for copy_method in (copy.copy, copy.deepcopy): b = copy_method(a) self.assertEqual(a, b) self.assertEqual(type(a), type(b)) def test_empty_sequence(self): b = self.type2test() self.assertEqual(len(b), 0) self.assertRaises(IndexError, lambda: b[0]) self.assertRaises(IndexError, lambda: b[1]) self.assertRaises(IndexError, lambda: b[sys.maxsize]) self.assertRaises(IndexError, lambda: b[sys.maxsize+1]) self.assertRaises(IndexError, lambda: b[10**100]) self.assertRaises(IndexError, lambda: b[-1]) self.assertRaises(IndexError, lambda: b[-2]) self.assertRaises(IndexError, lambda: b[-sys.maxsize]) self.assertRaises(IndexError, lambda: b[-sys.maxsize-1]) self.assertRaises(IndexError, lambda: b[-sys.maxsize-2]) self.assertRaises(IndexError, lambda: b[-10**100]) def test_from_list(self): ints = list(range(256)) b = self.type2test(i for i in ints) self.assertEqual(len(b), 256) self.assertEqual(list(b), ints) def test_from_index(self): b = self.type2test([Indexable(), Indexable(1), Indexable(254), Indexable(255)]) self.assertEqual(list(b), [0, 1, 254, 255]) self.assertRaises(ValueError, self.type2test, [Indexable(-1)]) self.assertRaises(ValueError, self.type2test, [Indexable(256)]) def test_from_buffer(self): a = self.type2test(array.array('B', [1, 2, 3])) self.assertEqual(a, b"\x01\x02\x03") # http://bugs.python.org/issue29159 # Fallback when __index__ raises exception other than OverflowError class B(bytes): def __index__(self): raise TypeError self.assertEqual(self.type2test(B(b"foobar")), b"foobar") def test_from_ssize(self): self.assertEqual(self.type2test(0), b'') self.assertEqual(self.type2test(1), b'\x00') self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00') self.assertRaises(ValueError, self.type2test, -1) self.assertEqual(self.type2test('0', 'ascii'), b'0') self.assertEqual(self.type2test(b'0'), b'0') self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1) def test_constructor_type_errors(self): self.assertRaises(TypeError, self.type2test, 0.0) class C: pass self.assertRaises(TypeError, self.type2test, ["0"]) self.assertRaises(TypeError, self.type2test, [0.0]) self.assertRaises(TypeError, self.type2test, [None]) self.assertRaises(TypeError, self.type2test, [C()]) self.assertRaises(TypeError, self.type2test, 0, 'ascii') self.assertRaises(TypeError, self.type2test, b'', 'ascii') self.assertRaises(TypeError, self.type2test, 0, errors='ignore') self.assertRaises(TypeError, self.type2test, b'', errors='ignore') self.assertRaises(TypeError, self.type2test, '') self.assertRaises(TypeError, self.type2test, '', errors='ignore') self.assertRaises(TypeError, self.type2test, '', b'ascii') self.assertRaises(TypeError, self.type2test, '', 'ascii', b'ignore') def test_constructor_value_errors(self): self.assertRaises(ValueError, self.type2test, [-1]) self.assertRaises(ValueError, self.type2test, [-sys.maxsize]) self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1]) self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2]) self.assertRaises(ValueError, self.type2test, [-10**100]) self.assertRaises(ValueError, self.type2test, [256]) self.assertRaises(ValueError, self.type2test, [257]) self.assertRaises(ValueError, self.type2test, [sys.maxsize]) self.assertRaises(ValueError, self.type2test, [sys.maxsize+1]) self.assertRaises(ValueError, self.type2test, [10**100]) @bigaddrspacetest def test_constructor_overflow(self): size = MAX_Py_ssize_t self.assertRaises((OverflowError, MemoryError), self.type2test, size) try: # Should either pass or raise an error (e.g. on debug builds with # additional malloc() overhead), but shouldn't crash. bytearray(size - 4) except (OverflowError, MemoryError): pass def test_compare(self): b1 = self.type2test([1, 2, 3]) b2 = self.type2test([1, 2, 3]) b3 = self.type2test([1, 3]) self.assertEqual(b1, b2) self.assertTrue(b2 != b3) self.assertTrue(b1 <= b2) self.assertTrue(b1 <= b3) self.assertTrue(b1 < b3) self.assertTrue(b1 >= b2) self.assertTrue(b3 >= b2) self.assertTrue(b3 > b2) self.assertFalse(b1 != b2) self.assertFalse(b2 == b3) self.assertFalse(b1 > b2) self.assertFalse(b1 > b3) self.assertFalse(b1 >= b3) self.assertFalse(b1 < b2) self.assertFalse(b3 < b2) self.assertFalse(b3 <= b2) @check_bytes_warnings def test_compare_to_str(self): # Byte comparisons with unicode should always fail! # Test this for all expected byte orders and Unicode character # sizes. self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False) self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc", False) self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False) self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc", False) self.assertEqual(self.type2test() == str(), False) self.assertEqual(self.type2test() != str(), True) def test_reversed(self): input = list(map(ord, "Hello")) b = self.type2test(input) output = list(reversed(b)) input.reverse() self.assertEqual(output, input) def test_getslice(self): def by(s): return self.type2test(map(ord, s)) b = by("Hello, world") self.assertEqual(b[:5], by("Hello")) self.assertEqual(b[1:5], by("ello")) self.assertEqual(b[5:7], by(", ")) self.assertEqual(b[7:], by("world")) self.assertEqual(b[7:12], by("world")) self.assertEqual(b[7:100], by("world")) self.assertEqual(b[:-7], by("Hello")) self.assertEqual(b[-11:-7], by("ello")) self.assertEqual(b[-7:-5], by(", ")) self.assertEqual(b[-5:], by("world")) self.assertEqual(b[-5:12], by("world")) self.assertEqual(b[-5:100], by("world")) self.assertEqual(b[-100:5], by("Hello")) def test_extended_getslice(self): # Test extended slicing by comparing with list slicing. L = list(range(255)) b = self.type2test(L) indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100) for start in indices: for stop in indices: # Skip step 0 (invalid) for step in indices[1:]: self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step])) def test_encoding(self): sample = "Hello world\n\u1234\u5678\u9abc" for enc in ("utf-8", "utf-16"): b = self.type2test(sample, enc) self.assertEqual(b, self.type2test(sample.encode(enc))) self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin-1") b = self.type2test(sample, "latin-1", "ignore") self.assertEqual(b, self.type2test(sample[:-3], "utf-8")) def test_decode(self): sample = "Hello world\n\u1234\u5678\u9abc" for enc in ("utf-8", "utf-16"): b = self.type2test(sample, enc) self.assertEqual(b.decode(enc), sample) sample = "Hello world\n\x80\x81\xfe\xff" b = self.type2test(sample, "latin-1") self.assertRaises(UnicodeDecodeError, b.decode, "utf-8") self.assertEqual(b.decode("utf-8", "ignore"), "Hello world\n") self.assertEqual(b.decode(errors="ignore", encoding="utf-8"), "Hello world\n") # Default encoding is utf-8 self.assertEqual(self.type2test(b'\xe2\x98\x83').decode(), '\u2603') def test_from_int(self): b = self.type2test(0) self.assertEqual(b, self.type2test()) b = self.type2test(10) self.assertEqual(b, self.type2test([0]*10)) b = self.type2test(10000) self.assertEqual(b, self.type2test([0]*10000)) def test_concat(self): b1 = self.type2test(b"abc") b2 = self.type2test(b"def") self.assertEqual(b1 + b2, b"abcdef") self.assertEqual(b1 + bytes(b"def"), b"abcdef") self.assertEqual(bytes(b"def") + b1, b"defabc") self.assertRaises(TypeError, lambda: b1 + "def") self.assertRaises(TypeError, lambda: "abc" + b2) def test_repeat(self): for b in b"abc", self.type2test(b"abc"): self.assertEqual(b * 3, b"abcabcabc") self.assertEqual(b * 0, b"") self.assertEqual(b * -1, b"") self.assertRaises(TypeError, lambda: b * 3.14) self.assertRaises(TypeError, lambda: 3.14 * b) # XXX Shouldn't bytes and bytearray agree on what to raise? with self.assertRaises((OverflowError, MemoryError)): c = b * sys.maxsize with self.assertRaises((OverflowError, MemoryError)): b *= sys.maxsize def test_repeat_1char(self): self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100)) def test_contains(self): b = self.type2test(b"abc") self.assertIn(ord('a'), b) self.assertIn(int(ord('a')), b) self.assertNotIn(200, b) self.assertRaises(ValueError, lambda: 300 in b) self.assertRaises(ValueError, lambda: -1 in b) self.assertRaises(ValueError, lambda: sys.maxsize+1 in b) self.assertRaises(TypeError, lambda: None in b) self.assertRaises(TypeError, lambda: float(ord('a')) in b) self.assertRaises(TypeError, lambda: "a" in b) for f in bytes, bytearray: self.assertIn(f(b""), b) self.assertIn(f(b"a"), b) self.assertIn(f(b"b"), b) self.assertIn(f(b"c"), b) self.assertIn(f(b"ab"), b) self.assertIn(f(b"bc"), b) self.assertIn(f(b"abc"), b) self.assertNotIn(f(b"ac"), b) self.assertNotIn(f(b"d"), b) self.assertNotIn(f(b"dab"), b) self.assertNotIn(f(b"abd"), b) def test_fromhex(self): self.assertRaises(TypeError, self.type2test.fromhex) self.assertRaises(TypeError, self.type2test.fromhex, 1) self.assertEqual(self.type2test.fromhex(''), self.type2test()) b = bytearray([0x1a, 0x2b, 0x30]) self.assertEqual(self.type2test.fromhex('1a2B30'), b) self.assertEqual(self.type2test.fromhex(' 1A 2B 30 '), b) self.assertEqual(self.type2test.fromhex('0000'), b'\0\0') self.assertRaises(TypeError, self.type2test.fromhex, b'1B') self.assertRaises(ValueError, self.type2test.fromhex, 'a') self.assertRaises(ValueError, self.type2test.fromhex, 'rt') self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd') self.assertRaises(ValueError, self.type2test.fromhex, '\x00') self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34') for data, pos in ( # invalid first hexadecimal character ('12 x4 56', 3), # invalid second hexadecimal character ('12 3x 56', 4), # two invalid hexadecimal characters ('12 xy 56', 3), # test non-ASCII string ('12 3\xff 56', 4), ): with self.assertRaises(ValueError) as cm: self.type2test.fromhex(data) self.assertIn('at position %s' % pos, str(cm.exception)) def test_hex(self): self.assertRaises(TypeError, self.type2test.hex) self.assertRaises(TypeError, self.type2test.hex, 1) self.assertEqual(self.type2test(b"").hex(), "") self.assertEqual(bytearray([0x1a, 0x2b, 0x30]).hex(), '1a2b30') self.assertEqual(self.type2test(b"\x1a\x2b\x30").hex(), '1a2b30') self.assertEqual(memoryview(b"\x1a\x2b\x30").hex(), '1a2b30') def test_join(self): self.assertEqual(self.type2test(b"").join([]), b"") self.assertEqual(self.type2test(b"").join([b""]), b"") for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]: lst = list(map(self.type2test, lst)) self.assertEqual(self.type2test(b"").join(lst), b"abc") self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc") self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc") dot_join = self.type2test(b".:").join self.assertEqual(dot_join([b"ab", b"cd"]), b"ab.:cd") self.assertEqual(dot_join([memoryview(b"ab"), b"cd"]), b"ab.:cd") self.assertEqual(dot_join([b"ab", memoryview(b"cd")]), b"ab.:cd") self.assertEqual(dot_join([bytearray(b"ab"), b"cd"]), b"ab.:cd") self.assertEqual(dot_join([b"ab", bytearray(b"cd")]), b"ab.:cd") # Stress it with many items seq = [b"abc"] * 1000 expected = b"abc" + b".:abc" * 999 self.assertEqual(dot_join(seq), expected) self.assertRaises(TypeError, self.type2test(b" ").join, None) # Error handling and cleanup when some item in the middle of the # sequence has the wrong type. with self.assertRaises(TypeError): dot_join([bytearray(b"ab"), "cd", b"ef"]) with self.assertRaises(TypeError): dot_join([memoryview(b"ab"), "cd", b"ef"]) def test_count(self): b = self.type2test(b'mississippi') i = 105 p = 112 w = 119 self.assertEqual(b.count(b'i'), 4) self.assertEqual(b.count(b'ss'), 2) self.assertEqual(b.count(b'w'), 0) self.assertEqual(b.count(i), 4) self.assertEqual(b.count(w), 0) self.assertEqual(b.count(b'i', 6), 2) self.assertEqual(b.count(b'p', 6), 2) self.assertEqual(b.count(b'i', 1, 3), 1) self.assertEqual(b.count(b'p', 7, 9), 1) self.assertEqual(b.count(i, 6), 2) self.assertEqual(b.count(p, 6), 2) self.assertEqual(b.count(i, 1, 3), 1) self.assertEqual(b.count(p, 7, 9), 1) def test_startswith(self): b = self.type2test(b'hello') self.assertFalse(self.type2test().startswith(b"anything")) self.assertTrue(b.startswith(b"hello")) self.assertTrue(b.startswith(b"hel")) self.assertTrue(b.startswith(b"h")) self.assertFalse(b.startswith(b"hellow")) self.assertFalse(b.startswith(b"ha")) with self.assertRaises(TypeError) as cm: b.startswith([b'h']) exc = str(cm.exception) self.assertIn('bytes', exc) self.assertIn('tuple', exc) def test_endswith(self): b = self.type2test(b'hello') self.assertFalse(bytearray().endswith(b"anything")) self.assertTrue(b.endswith(b"hello")) self.assertTrue(b.endswith(b"llo")) self.assertTrue(b.endswith(b"o")) self.assertFalse(b.endswith(b"whello")) self.assertFalse(b.endswith(b"no")) with self.assertRaises(TypeError) as cm: b.endswith([b'o']) exc = str(cm.exception) self.assertIn('bytes', exc) self.assertIn('tuple', exc) def test_find(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.find(b'ss'), 2) self.assertEqual(b.find(b'w'), -1) self.assertEqual(b.find(b'mississippian'), -1) self.assertEqual(b.find(i), 1) self.assertEqual(b.find(w), -1) self.assertEqual(b.find(b'ss', 3), 5) self.assertEqual(b.find(b'ss', 1, 7), 2) self.assertEqual(b.find(b'ss', 1, 3), -1) self.assertEqual(b.find(i, 6), 7) self.assertEqual(b.find(i, 1, 3), 1) self.assertEqual(b.find(w, 1, 3), -1) for index in (-1, 256, sys.maxsize + 1): self.assertRaisesRegex( ValueError, r'byte must be in range\(0, 256\)', b.find, index) def test_rfind(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.rfind(b'ss'), 5) self.assertEqual(b.rfind(b'w'), -1) self.assertEqual(b.rfind(b'mississippian'), -1) self.assertEqual(b.rfind(i), 10) self.assertEqual(b.rfind(w), -1) self.assertEqual(b.rfind(b'ss', 3), 5) self.assertEqual(b.rfind(b'ss', 0, 6), 2) self.assertEqual(b.rfind(i, 1, 3), 1) self.assertEqual(b.rfind(i, 3, 9), 7) self.assertEqual(b.rfind(w, 1, 3), -1) def test_index(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.index(b'ss'), 2) self.assertRaises(ValueError, b.index, b'w') self.assertRaises(ValueError, b.index, b'mississippian') self.assertEqual(b.index(i), 1) self.assertRaises(ValueError, b.index, w) self.assertEqual(b.index(b'ss', 3), 5) self.assertEqual(b.index(b'ss', 1, 7), 2) self.assertRaises(ValueError, b.index, b'ss', 1, 3) self.assertEqual(b.index(i, 6), 7) self.assertEqual(b.index(i, 1, 3), 1) self.assertRaises(ValueError, b.index, w, 1, 3) def test_rindex(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.rindex(b'ss'), 5) self.assertRaises(ValueError, b.rindex, b'w') self.assertRaises(ValueError, b.rindex, b'mississippian') self.assertEqual(b.rindex(i), 10) self.assertRaises(ValueError, b.rindex, w) self.assertEqual(b.rindex(b'ss', 3), 5) self.assertEqual(b.rindex(b'ss', 0, 6), 2) self.assertEqual(b.rindex(i, 1, 3), 1) self.assertEqual(b.rindex(i, 3, 9), 7) self.assertRaises(ValueError, b.rindex, w, 1, 3) def test_mod(self): b = self.type2test(b'hello, %b!') orig = b b = b % b'world' self.assertEqual(b, b'hello, world!') self.assertEqual(orig, b'hello, %b!') self.assertFalse(b is orig) b = self.type2test(b'%s / 100 = %d%%') a = b % (b'seventy-nine', 79) self.assertEqual(a, b'seventy-nine / 100 = 79%') self.assertIs(type(a), self.type2test) def test_imod(self): b = self.type2test(b'hello, %b!') orig = b b %= b'world' self.assertEqual(b, b'hello, world!') self.assertEqual(orig, b'hello, %b!') self.assertFalse(b is orig) b = self.type2test(b'%s / 100 = %d%%') b %= (b'seventy-nine', 79) self.assertEqual(b, b'seventy-nine / 100 = 79%') self.assertIs(type(b), self.type2test) def test_rmod(self): with self.assertRaises(TypeError): object() % self.type2test(b'abc') self.assertIs(self.type2test(b'abc').__rmod__('%r'), NotImplemented) def test_replace(self): b = self.type2test(b'mississippi') self.assertEqual(b.replace(b'i', b'a'), b'massassappa') self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi') def test_split_string_error(self): self.assertRaises(TypeError, self.type2test(b'a b').split, ' ') def test_split_unicodewhitespace(self): for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'): b = self.type2test(b) self.assertEqual(b.split(), [b]) b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F") self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f']) def test_rsplit_string_error(self): self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ') def test_rsplit_unicodewhitespace(self): b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F") self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f']) def test_partition(self): b = self.type2test(b'mississippi') self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi')) self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b'')) def test_rpartition(self): b = self.type2test(b'mississippi') self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi')) self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b'')) self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi')) def test_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0": b = self.type2test(b) ps = pickle.dumps(b, proto) q = pickle.loads(ps) self.assertEqual(b, q) def test_iterator_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0": it = itorg = iter(self.type2test(b)) data = list(self.type2test(b)) d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(list(it), data) it = pickle.loads(d) if not b: continue next(it) d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(list(it), data[1:]) def test_strip_bytearray(self): self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b') self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc') self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab') def test_strip_string_error(self): self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b') self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b') self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b') def test_center(self): # Fill character can be either bytes or bytearray (issue 12380) b = self.type2test(b'abc') for fill_type in (bytes, bytearray): self.assertEqual(b.center(7, fill_type(b'-')), self.type2test(b'--abc--')) def test_ljust(self): # Fill character can be either bytes or bytearray (issue 12380) b = self.type2test(b'abc') for fill_type in (bytes, bytearray): self.assertEqual(b.ljust(7, fill_type(b'-')), self.type2test(b'abc----')) def test_rjust(self): # Fill character can be either bytes or bytearray (issue 12380) b = self.type2test(b'abc') for fill_type in (bytes, bytearray): self.assertEqual(b.rjust(7, fill_type(b'-')), self.type2test(b'----abc')) def test_ord(self): b = self.type2test(b'\0A\x7f\x80\xff') self.assertEqual([ord(b[i:i+1]) for i in range(len(b))], [0, 65, 127, 128, 255]) def test_maketrans(self): transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377' self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable) transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz' self.assertEqual(self.type2test.maketrans(b'\375\376\377', b'xyz'), transtable) self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq') self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def') def test_none_arguments(self): # issue 11828 b = self.type2test(b'hello') l = self.type2test(b'l') h = self.type2test(b'h') x = self.type2test(b'x') o = self.type2test(b'o') self.assertEqual(2, b.find(l, None)) self.assertEqual(3, b.find(l, -2, None)) self.assertEqual(2, b.find(l, None, -2)) self.assertEqual(0, b.find(h, None, None)) self.assertEqual(3, b.rfind(l, None)) self.assertEqual(3, b.rfind(l, -2, None)) self.assertEqual(2, b.rfind(l, None, -2)) self.assertEqual(0, b.rfind(h, None, None)) self.assertEqual(2, b.index(l, None)) self.assertEqual(3, b.index(l, -2, None)) self.assertEqual(2, b.index(l, None, -2)) self.assertEqual(0, b.index(h, None, None)) self.assertEqual(3, b.rindex(l, None)) self.assertEqual(3, b.rindex(l, -2, None)) self.assertEqual(2, b.rindex(l, None, -2)) self.assertEqual(0, b.rindex(h, None, None)) self.assertEqual(2, b.count(l, None)) self.assertEqual(1, b.count(l, -2, None)) self.assertEqual(1, b.count(l, None, -2)) self.assertEqual(0, b.count(x, None, None)) self.assertEqual(True, b.endswith(o, None)) self.assertEqual(True, b.endswith(o, -2, None)) self.assertEqual(True, b.endswith(l, None, -2)) self.assertEqual(False, b.endswith(x, None, None)) self.assertEqual(True, b.startswith(h, None)) self.assertEqual(True, b.startswith(l, -2, None)) self.assertEqual(True, b.startswith(h, None, -2)) self.assertEqual(False, b.startswith(x, None, None)) def test_integer_arguments_out_of_byte_range(self): b = self.type2test(b'hello') for method in (b.count, b.find, b.index, b.rfind, b.rindex): self.assertRaises(ValueError, method, -1) self.assertRaises(ValueError, method, 256) self.assertRaises(ValueError, method, 9999) def test_find_etc_raise_correct_error_messages(self): # issue 11828 b = self.type2test(b'hello') x = self.type2test(b'x') self.assertRaisesRegex(TypeError, r'\bfind\b', b.find, x, None, None, None) self.assertRaisesRegex(TypeError, r'\brfind\b', b.rfind, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bindex\b', b.index, x, None, None, None) self.assertRaisesRegex(TypeError, r'\brindex\b', b.rindex, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bcount\b', b.count, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bstartswith\b', b.startswith, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bendswith\b', b.endswith, x, None, None, None) def test_free_after_iterating(self): test.support.check_free_after_iterating(self, iter, self.type2test) test.support.check_free_after_iterating(self, reversed, self.type2test) def test_translate(self): b = self.type2test(b'hello') rosetta = bytearray(range(256)) rosetta[ord('o')] = ord('e') self.assertRaises(TypeError, b.translate) self.assertRaises(TypeError, b.translate, None, None) self.assertRaises(ValueError, b.translate, bytes(range(255))) c = b.translate(rosetta, b'hello') self.assertEqual(b, b'hello') self.assertIsInstance(c, self.type2test) c = b.translate(rosetta) d = b.translate(rosetta, b'') self.assertEqual(c, d) self.assertEqual(c, b'helle') c = b.translate(rosetta, b'l') self.assertEqual(c, b'hee') c = b.translate(None, b'e') self.assertEqual(c, b'hllo') # test delete as a keyword argument c = b.translate(rosetta, delete=b'') self.assertEqual(c, b'helle') c = b.translate(rosetta, delete=b'l') self.assertEqual(c, b'hee') c = b.translate(None, delete=b'e') self.assertEqual(c, b'hllo') class BytesTest(BaseBytesTest, unittest.TestCase): type2test = bytes def test_getitem_error(self): msg = "byte indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): b'python'['a'] def test_buffer_is_readonly(self): fd = os.open(__file__, os.O_RDONLY) with open(fd, "rb", buffering=0) as f: self.assertRaises(TypeError, f.readinto, b"") def test_custom(self): class A: def __bytes__(self): return b'abc' self.assertEqual(bytes(A()), b'abc') class A: pass self.assertRaises(TypeError, bytes, A()) class A: def __bytes__(self): return None self.assertRaises(TypeError, bytes, A()) class A: def __bytes__(self): return b'a' def __index__(self): return 42 self.assertEqual(bytes(A()), b'a') # Issue #25766 class A(str): def __bytes__(self): return b'abc' self.assertEqual(bytes(A('\u20ac')), b'abc') self.assertEqual(bytes(A('\u20ac'), 'iso8859-15'), b'\xa4') # Issue #24731 class A: def __bytes__(self): return OtherBytesSubclass(b'abc') self.assertEqual(bytes(A()), b'abc') self.assertIs(type(bytes(A())), OtherBytesSubclass) self.assertEqual(BytesSubclass(A()), b'abc') self.assertIs(type(BytesSubclass(A())), BytesSubclass) # Test PyBytes_FromFormat() def test_from_format(self): ctypes = test.support.import_module('ctypes') _testcapi = test.support.import_module('_testcapi') from ctypes import pythonapi, py_object from ctypes import ( c_int, c_uint, c_long, c_ulong, c_size_t, c_ssize_t, c_char_p) PyBytes_FromFormat = pythonapi.PyBytes_FromFormat PyBytes_FromFormat.restype = py_object # basic tests self.assertEqual(PyBytes_FromFormat(b'format'), b'format') self.assertEqual(PyBytes_FromFormat(b'Hello %s !', b'world'), b'Hello world !') # test formatters self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(0)), b'c=\0') self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(ord('@'))), b'c=@') self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(255)), b'c=\xff') self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd', c_int(1), c_long(2), c_size_t(3)), b'd=1 ld=2 zd=3') self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd', c_int(-1), c_long(-2), c_size_t(-3)), b'd=-1 ld=-2 zd=-3') self.assertEqual(PyBytes_FromFormat(b'u=%u lu=%lu zu=%zu', c_uint(123), c_ulong(456), c_size_t(789)), b'u=123 lu=456 zu=789') self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(123)), b'i=123') self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(-123)), b'i=-123') self.assertEqual(PyBytes_FromFormat(b'x=%x', c_int(0xabc)), b'x=abc') sizeof_ptr = ctypes.sizeof(c_char_p) if os.name == 'nt': # Windows (MSCRT) ptr_format = '0x%0{}X'.format(2 * sizeof_ptr) def ptr_formatter(ptr): return (ptr_format % ptr) else: # UNIX (glibc) def ptr_formatter(ptr): return '%#x' % ptr ptr = 0xabcdef self.assertEqual(PyBytes_FromFormat(b'ptr=%p', c_char_p(ptr)), ('ptr=' + ptr_formatter(ptr)).encode('ascii')) self.assertEqual(PyBytes_FromFormat(b's=%s', c_char_p(b'cstr')), b's=cstr') # test minimum and maximum integer values size_max = c_size_t(-1).value for formatstr, ctypes_type, value, py_formatter in ( (b'%d', c_int, _testcapi.INT_MIN, str), (b'%d', c_int, _testcapi.INT_MAX, str), (b'%ld', c_long, _testcapi.LONG_MIN, str), (b'%ld', c_long, _testcapi.LONG_MAX, str), (b'%lu', c_ulong, _testcapi.ULONG_MAX, str), (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MIN, str), (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MAX, str), (b'%zu', c_size_t, size_max, str), (b'%p', c_char_p, size_max, ptr_formatter), ): self.assertEqual(PyBytes_FromFormat(formatstr, ctypes_type(value)), py_formatter(value).encode('ascii')), # width and precision (width is currently ignored) self.assertEqual(PyBytes_FromFormat(b'%5s', b'a'), b'a') self.assertEqual(PyBytes_FromFormat(b'%.3s', b'abcdef'), b'abc') # '%%' formatter self.assertEqual(PyBytes_FromFormat(b'%%'), b'%') self.assertEqual(PyBytes_FromFormat(b'[%%]'), b'[%]') self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), b'%_') self.assertEqual(PyBytes_FromFormat(b'%%s'), b'%s') # Invalid formats and partial formatting self.assertEqual(PyBytes_FromFormat(b'%'), b'%') self.assertEqual(PyBytes_FromFormat(b'x=%i y=%', c_int(2), c_int(3)), b'x=2 y=%') # Issue #19969: %c must raise OverflowError for values # not in the range [0; 255] self.assertRaises(OverflowError, PyBytes_FromFormat, b'%c', c_int(-1)) self.assertRaises(OverflowError, PyBytes_FromFormat, b'%c', c_int(256)) def test_bytes_blocking(self): class IterationBlocked(list): __bytes__ = None i = [0, 1, 2, 3] self.assertEqual(bytes(i), b'\x00\x01\x02\x03') self.assertRaises(TypeError, bytes, IterationBlocked(i)) # At least in CPython, because bytes.__new__ and the C API # PyBytes_FromObject have different fallback rules, integer # fallback is handled specially, so test separately. class IntBlocked(int): __bytes__ = None self.assertEqual(bytes(3), b'\0\0\0') self.assertRaises(TypeError, bytes, IntBlocked(3)) # While there is no separately-defined rule for handling bytes # subclasses differently from other buffer-interface classes, # an implementation may well special-case them (as CPython 2.x # str did), so test them separately. class BytesSubclassBlocked(bytes): __bytes__ = None self.assertEqual(bytes(b'ab'), b'ab') self.assertRaises(TypeError, bytes, BytesSubclassBlocked(b'ab')) class BufferBlocked(bytearray): __bytes__ = None ba, bb = bytearray(b'ab'), BufferBlocked(b'ab') self.assertEqual(bytes(ba), b'ab') self.assertRaises(TypeError, bytes, bb) class ByteArrayTest(BaseBytesTest, unittest.TestCase): type2test = bytearray def test_getitem_error(self): msg = "bytearray indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): bytearray(b'python')['a'] def test_setitem_error(self): msg = "bytearray indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): b = bytearray(b'python') b['a'] = "python" def test_nohash(self): self.assertRaises(TypeError, hash, bytearray()) def test_bytearray_api(self): short_sample = b"Hello world\n" sample = short_sample + b"\0"*(20 - len(short_sample)) tfn = tempfile.mktemp() try: # Prepare with open(tfn, "wb") as f: f.write(short_sample) # Test readinto with open(tfn, "rb") as f: b = bytearray(20) n = f.readinto(b) self.assertEqual(n, len(short_sample)) self.assertEqual(list(b), list(sample)) # Test writing in binary mode with open(tfn, "wb") as f: f.write(b) with open(tfn, "rb") as f: self.assertEqual(f.read(), sample) # Text mode is ambiguous; don't test finally: try: os.remove(tfn) except OSError: pass def test_reverse(self): b = bytearray(b'hello') self.assertEqual(b.reverse(), None) self.assertEqual(b, b'olleh') b = bytearray(b'hello1') # test even number of items b.reverse() self.assertEqual(b, b'1olleh') b = bytearray() b.reverse() self.assertFalse(b) def test_clear(self): b = bytearray(b'python') b.clear() self.assertEqual(b, b'') b = bytearray(b'') b.clear() self.assertEqual(b, b'') b = bytearray(b'') b.append(ord('r')) b.clear() b.append(ord('p')) self.assertEqual(b, b'p') def test_copy(self): b = bytearray(b'abc') bb = b.copy() self.assertEqual(bb, b'abc') b = bytearray(b'') bb = b.copy() self.assertEqual(bb, b'') # test that it's indeed a copy and not a reference b = bytearray(b'abc') bb = b.copy() self.assertEqual(b, bb) self.assertIsNot(b, bb) bb.append(ord('d')) self.assertEqual(bb, b'abcd') self.assertEqual(b, b'abc') def test_regexps(self): def by(s): return bytearray(map(ord, s)) b = by("Hello, world") self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")]) def test_setitem(self): b = bytearray([1, 2, 3]) b[1] = 100 self.assertEqual(b, bytearray([1, 100, 3])) b[-1] = 200 self.assertEqual(b, bytearray([1, 100, 200])) b[0] = Indexable(10) self.assertEqual(b, bytearray([10, 100, 200])) try: b[3] = 0 self.fail("Didn't raise IndexError") except IndexError: pass try: b[-10] = 0 self.fail("Didn't raise IndexError") except IndexError: pass try: b[0] = 256 self.fail("Didn't raise ValueError") except ValueError: pass try: b[0] = Indexable(-1) self.fail("Didn't raise ValueError") except ValueError: pass try: b[0] = None self.fail("Didn't raise TypeError") except TypeError: pass def test_delitem(self): b = bytearray(range(10)) del b[0] self.assertEqual(b, bytearray(range(1, 10))) del b[-1] self.assertEqual(b, bytearray(range(1, 9))) del b[4] self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8])) def test_setslice(self): b = bytearray(range(10)) self.assertEqual(list(b), list(range(10))) b[0:5] = bytearray([1, 1, 1, 1, 1]) self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9])) del b[0:-5] self.assertEqual(b, bytearray([5, 6, 7, 8, 9])) b[0:0] = bytearray([0, 1, 2, 3, 4]) self.assertEqual(b, bytearray(range(10))) b[-7:-3] = bytearray([100, 101]) self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9])) b[3:5] = [3, 4, 5, 6] self.assertEqual(b, bytearray(range(10))) b[3:0] = [42, 42, 42] self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9])) b[3:] = b'foo' self.assertEqual(b, bytearray([0, 1, 2, 102, 111, 111])) b[:3] = memoryview(b'foo') self.assertEqual(b, bytearray([102, 111, 111, 102, 111, 111])) b[3:4] = [] self.assertEqual(b, bytearray([102, 111, 111, 111, 111])) for elem in [5, -5, 0, int(10e20), 'str', 2.3, ['a', 'b'], [b'a', b'b'], [[]]]: with self.assertRaises(TypeError): b[3:4] = elem for elem in [[254, 255, 256], [-256, 9000]]: with self.assertRaises(ValueError): b[3:4] = elem def test_setslice_extend(self): # Exercise the resizing logic (see issue #19087) b = bytearray(range(100)) self.assertEqual(list(b), list(range(100))) del b[:10] self.assertEqual(list(b), list(range(10, 100))) b.extend(range(100, 110)) self.assertEqual(list(b), list(range(10, 110))) def test_fifo_overrun(self): # Test for issue #23985, a buffer overrun when implementing a FIFO # Build Python in pydebug mode for best results. b = bytearray(10) b.pop() # Defeat expanding buffer off-by-one quirk del b[:1] # Advance start pointer without reallocating b += bytes(2) # Append exactly the number of deleted bytes del b # Free memory buffer, allowing pydebug verification def test_del_expand(self): # Reducing the size should not expand the buffer (issue #23985) b = bytearray(10) try: size = sys.getsizeof(b) except TypeError: pass # e.g. on pypy else: del b[:1] self.assertLessEqual(sys.getsizeof(b), size) def test_extended_set_del_slice(self): indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300) for start in indices: for stop in indices: # Skip invalid step 0 for step in indices[1:]: L = list(range(255)) b = bytearray(L) # Make sure we have a slice of exactly the right length, # but with different data. data = L[start:stop:step] data.reverse() L[start:stop:step] = data b[start:stop:step] = data self.assertEqual(b, bytearray(L)) del L[start:stop:step] del b[start:stop:step] self.assertEqual(b, bytearray(L)) def test_setslice_trap(self): # This test verifies that we correctly handle assigning self # to a slice of self (the old Lambert Meertens trap). b = bytearray(range(256)) b[8:] = b self.assertEqual(b, bytearray(list(range(8)) + list(range(256)))) def test_iconcat(self): b = bytearray(b"abc") b1 = b b += b"def" self.assertEqual(b, b"abcdef") self.assertEqual(b, b1) self.assertTrue(b is b1) b += b"xyz" self.assertEqual(b, b"abcdefxyz") try: b += "" except TypeError: pass else: self.fail("bytes += unicode didn't raise TypeError") def test_irepeat(self): b = bytearray(b"abc") b1 = b b *= 3 self.assertEqual(b, b"abcabcabc") self.assertEqual(b, b1) self.assertTrue(b is b1) def test_irepeat_1char(self): b = bytearray(b"x") b1 = b b *= 100 self.assertEqual(b, b"x"*100) self.assertEqual(b, b1) self.assertTrue(b is b1) def test_alloc(self): b = bytearray() alloc = b.__alloc__() self.assertTrue(alloc >= 0) seq = [alloc] for i in range(100): b += b"x" alloc = b.__alloc__() self.assertGreater(alloc, len(b)) # including trailing null byte if alloc not in seq: seq.append(alloc) def test_init_alloc(self): b = bytearray() def g(): for i in range(1, 100): yield i a = list(b) self.assertEqual(a, list(range(1, len(a)+1))) self.assertEqual(len(b), len(a)) self.assertLessEqual(len(b), i) alloc = b.__alloc__() self.assertGreater(alloc, len(b)) # including trailing null byte b.__init__(g()) self.assertEqual(list(b), list(range(1, 100))) self.assertEqual(len(b), 99) alloc = b.__alloc__() self.assertGreater(alloc, len(b)) def test_extend(self): orig = b'hello' a = bytearray(orig) a.extend(a) self.assertEqual(a, orig + orig) self.assertEqual(a[5:], orig) a = bytearray(b'') # Test iterators that don't have a __length_hint__ a.extend(map(int, orig * 25)) a.extend(int(x) for x in orig * 25) self.assertEqual(a, orig * 50) self.assertEqual(a[-5:], orig) a = bytearray(b'') a.extend(iter(map(int, orig * 50))) self.assertEqual(a, orig * 50) self.assertEqual(a[-5:], orig) a = bytearray(b'') a.extend(list(map(int, orig * 50))) self.assertEqual(a, orig * 50) self.assertEqual(a[-5:], orig) a = bytearray(b'') self.assertRaises(ValueError, a.extend, [0, 1, 2, 256]) self.assertRaises(ValueError, a.extend, [0, 1, 2, -1]) self.assertEqual(len(a), 0) a = bytearray(b'') a.extend([Indexable(ord('a'))]) self.assertEqual(a, b'a') def test_remove(self): b = bytearray(b'hello') b.remove(ord('l')) self.assertEqual(b, b'helo') b.remove(ord('l')) self.assertEqual(b, b'heo') self.assertRaises(ValueError, lambda: b.remove(ord('l'))) self.assertRaises(ValueError, lambda: b.remove(400)) self.assertRaises(TypeError, lambda: b.remove('e')) # remove first and last b.remove(ord('o')) b.remove(ord('h')) self.assertEqual(b, b'e') self.assertRaises(TypeError, lambda: b.remove(b'e')) b.remove(Indexable(ord('e'))) self.assertEqual(b, b'') # test values outside of the ascii range: (0, 127) c = bytearray([126, 127, 128, 129]) c.remove(127) self.assertEqual(c, bytes([126, 128, 129])) c.remove(129) self.assertEqual(c, bytes([126, 128])) def test_pop(self): b = bytearray(b'world') self.assertEqual(b.pop(), ord('d')) self.assertEqual(b.pop(0), ord('w')) self.assertEqual(b.pop(-2), ord('r')) self.assertRaises(IndexError, lambda: b.pop(10)) self.assertRaises(IndexError, lambda: bytearray().pop()) # test for issue #6846 self.assertEqual(bytearray(b'\xff').pop(), 0xff) def test_nosort(self): self.assertRaises(AttributeError, lambda: bytearray().sort()) def test_append(self): b = bytearray(b'hell') b.append(ord('o')) self.assertEqual(b, b'hello') self.assertEqual(b.append(100), None) b = bytearray() b.append(ord('A')) self.assertEqual(len(b), 1) self.assertRaises(TypeError, lambda: b.append(b'o')) b = bytearray() b.append(Indexable(ord('A'))) self.assertEqual(b, b'A') def test_insert(self): b = bytearray(b'msssspp') b.insert(1, ord('i')) b.insert(4, ord('i')) b.insert(-2, ord('i')) b.insert(1000, ord('i')) self.assertEqual(b, b'mississippi') self.assertRaises(TypeError, lambda: b.insert(0, b'1')) b = bytearray() b.insert(0, Indexable(ord('A'))) self.assertEqual(b, b'A') def test_copied(self): # Issue 4348. Make sure that operations that don't mutate the array # copy the bytes. b = bytearray(b'abc') self.assertFalse(b is b.replace(b'abc', b'cde', 0)) t = bytearray([i for i in range(256)]) x = bytearray(b'') self.assertFalse(x is x.translate(t)) def test_partition_bytearray_doesnt_share_nullstring(self): a, b, c = bytearray(b"x").partition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") self.assertTrue(b is not c) b += b"!" self.assertEqual(c, b"") a, b, c = bytearray(b"x").partition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") # Same for rpartition b, c, a = bytearray(b"x").rpartition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") self.assertTrue(b is not c) b += b"!" self.assertEqual(c, b"") c, b, a = bytearray(b"x").rpartition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") @test.support.impl_detail( "resizing semantics of CPython rely on refcounting") def test_resize_forbidden(self): # #4509: can't resize a bytearray when there are buffer exports, even # if it wouldn't reallocate the underlying buffer. # Furthermore, no destructive changes to the buffer may be applied # before raising the error. b = bytearray(range(10)) v = memoryview(b) def resize(n): b[1:-1] = range(n + 1, 2*n - 1) resize(10) orig = b[:] self.assertRaises(BufferError, resize, 11) self.assertEqual(b, orig) self.assertRaises(BufferError, resize, 9) self.assertEqual(b, orig) self.assertRaises(BufferError, resize, 0) self.assertEqual(b, orig) # Other operations implying resize self.assertRaises(BufferError, b.pop, 0) self.assertEqual(b, orig) self.assertRaises(BufferError, b.remove, b[1]) self.assertEqual(b, orig) def delitem(): del b[1] self.assertRaises(BufferError, delitem) self.assertEqual(b, orig) # deleting a non-contiguous slice def delslice(): b[1:-1:2] = b"" self.assertRaises(BufferError, delslice) self.assertEqual(b, orig) @test.support.impl_detail("resizing semantics", cpython=False) def test_resize_forbidden_non_cpython(self): # on non-CPython implementations, we cannot prevent changes to # bytearrays just because there are buffers around. Instead, # we get (on PyPy) a buffer that follows the changes and resizes. b = bytearray(range(10)) v = memoryview(b) b[5] = 99 self.assertIn(v[5], (99, bytes([99]))) b[5] = 100 b += b b += b b += b self.assertEquals(len(v), 80) self.assertIn(v[5], (100, bytes([100]))) self.assertIn(v[79], (9, bytes([9]))) del b[10:] self.assertRaises(IndexError, lambda: v[10]) self.assertEquals(len(v), 10) @test.support.cpython_only def test_obsolete_write_lock(self): from _testcapi import getbuffer_with_null_view self.assertRaises(BufferError, getbuffer_with_null_view, bytearray()) def test_iterator_pickling2(self): orig = bytearray(b'abc') data = list(b'qwerty') for proto in range(pickle.HIGHEST_PROTOCOL + 1): # initial iterator itorig = iter(orig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(type(it), type(itorig)) self.assertEqual(list(it), data) # running iterator next(itorig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(type(it), type(itorig)) self.assertEqual(list(it), data[1:]) # empty iterator for i in range(1, len(orig)): next(itorig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(type(it), type(itorig)) self.assertEqual(list(it), data[len(orig):]) # exhausted iterator self.assertRaises(StopIteration, next, itorig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(list(it), []) test_exhausted_iterator = test.list_tests.CommonTest.test_exhausted_iterator def test_iterator_length_hint(self): # Issue 27443: __length_hint__ can return negative integer ba = bytearray(b'ab') it = iter(ba) next(it) ba.clear() # Shouldn't raise an error self.assertEqual(list(it), []) class AssortedBytesTest(unittest.TestCase): # # Test various combinations of bytes and bytearray # @check_bytes_warnings def test_repr_str(self): for f in str, repr: self.assertEqual(f(bytearray()), "bytearray(b'')") self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')") self.assertEqual(f(bytearray([0, 1, 254, 255])), "bytearray(b'\\x00\\x01\\xfe\\xff')") self.assertEqual(f(b"abc"), "b'abc'") self.assertEqual(f(b"'"), '''b"'"''') # ''' self.assertEqual(f(b"'\""), r"""b'\'"'""") # ' @check_bytes_warnings def test_format(self): for b in b'abc', bytearray(b'abc'): self.assertEqual(format(b), str(b)) self.assertEqual(format(b, ''), str(b)) with self.assertRaisesRegex(TypeError, r'\b%s\b' % re.escape(type(b).__name__)): format(b, 's') def test_compare_bytes_to_bytearray(self): self.assertEqual(b"abc" == bytes(b"abc"), True) self.assertEqual(b"ab" != bytes(b"abc"), True) self.assertEqual(b"ab" <= bytes(b"abc"), True) self.assertEqual(b"ab" < bytes(b"abc"), True) self.assertEqual(b"abc" >= bytes(b"ab"), True) self.assertEqual(b"abc" > bytes(b"ab"), True) self.assertEqual(b"abc" != bytes(b"abc"), False) self.assertEqual(b"ab" == bytes(b"abc"), False) self.assertEqual(b"ab" > bytes(b"abc"), False) self.assertEqual(b"ab" >= bytes(b"abc"), False) self.assertEqual(b"abc" < bytes(b"ab"), False) self.assertEqual(b"abc" <= bytes(b"ab"), False) self.assertEqual(bytes(b"abc") == b"abc", True) self.assertEqual(bytes(b"ab") != b"abc", True) self.assertEqual(bytes(b"ab") <= b"abc", True) self.assertEqual(bytes(b"ab") < b"abc", True) self.assertEqual(bytes(b"abc") >= b"ab", True) self.assertEqual(bytes(b"abc") > b"ab", True) self.assertEqual(bytes(b"abc") != b"abc", False) self.assertEqual(bytes(b"ab") == b"abc", False) self.assertEqual(bytes(b"ab") > b"abc", False) self.assertEqual(bytes(b"ab") >= b"abc", False) self.assertEqual(bytes(b"abc") < b"ab", False) self.assertEqual(bytes(b"abc") <= b"ab", False) @test.support.requires_docstrings def test_doc(self): self.assertIsNotNone(bytearray.__doc__) self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__) self.assertIsNotNone(bytes.__doc__) self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__) def test_from_bytearray(self): sample = bytes(b"Hello world\n\x80\x81\xfe\xff") buf = memoryview(sample) b = bytearray(buf) self.assertEqual(b, bytearray(sample)) @check_bytes_warnings def test_to_str(self): self.assertEqual(str(b''), "b''") self.assertEqual(str(b'x'), "b'x'") self.assertEqual(str(b'\x80'), "b'\\x80'") self.assertEqual(str(bytearray(b'')), "bytearray(b'')") self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')") self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')") def test_literal(self): tests = [ (b"Wonderful spam", "Wonderful spam"), (br"Wonderful spam too", "Wonderful spam too"), (b"\xaa\x00\000\200", "\xaa\x00\000\200"), (br"\xaa\x00\000\200", r"\xaa\x00\000\200"), ] for b, s in tests: self.assertEqual(b, bytearray(s, 'latin-1')) for c in range(128, 256): self.assertRaises(SyntaxError, eval, 'b"%s"' % chr(c)) def test_split_bytearray(self): self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b']) def test_rsplit_bytearray(self): self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b']) def test_return_self(self): # bytearray.replace must always return a new bytearray b = bytearray() self.assertFalse(b.replace(b'', b'') is b) @unittest.skipUnless(sys.flags.bytes_warning, "BytesWarning is needed for this test: use -bb option") def test_compare(self): def bytes_warning(): return test.support.check_warnings(('', BytesWarning)) with bytes_warning(): b'' == '' with bytes_warning(): '' == b'' with bytes_warning(): b'' != '' with bytes_warning(): '' != b'' with bytes_warning(): bytearray(b'') == '' with bytes_warning(): '' == bytearray(b'') with bytes_warning(): bytearray(b'') != '' with bytes_warning(): '' != bytearray(b'') with bytes_warning(): b'\0' == 0 with bytes_warning(): 0 == b'\0' with bytes_warning(): b'\0' != 0 with bytes_warning(): 0 != b'\0' # Optimizations: # __iter__? (optimization) # __reversed__? (optimization) # XXX More string methods? (Those that don't use character properties) # There are tests in string_tests.py that are more # comprehensive for things like partition, etc. # Unfortunately they are all bundled with tests that # are not appropriate for bytes # I've started porting some of those into bytearray_tests.py, we should port # the rest that make sense (the code can be cleaned up to use modern # unittest methods at the same time). class BytearrayPEP3137Test(unittest.TestCase): def marshal(self, x): return bytearray(x) def test_returns_new_copy(self): val = self.marshal(b'1234') # On immutable types these MAY return a reference to themselves # but on mutable types like bytearray they MUST return a new copy. for methname in ('zfill', 'rjust', 'ljust', 'center'): method = getattr(val, methname) newval = method(3) self.assertEqual(val, newval) self.assertTrue(val is not newval, methname+' returned self on a mutable object') for expr in ('val.split()[0]', 'val.rsplit()[0]', 'val.partition(b".")[0]', 'val.rpartition(b".")[2]', 'val.splitlines()[0]', 'val.replace(b"", b"")'): newval = eval(expr) self.assertEqual(val, newval) self.assertTrue(val is not newval, expr+' returned val on a mutable object') sep = self.marshal(b'') newval = sep.join([val]) self.assertEqual(val, newval) self.assertIsNot(val, newval) class FixedStringTest(test.string_tests.BaseTest): def fixtype(self, obj): if isinstance(obj, str): return self.type2test(obj.encode("utf-8")) return super().fixtype(obj) contains_bytes = True class ByteArrayAsStringTest(FixedStringTest, unittest.TestCase): type2test = bytearray class BytesAsStringTest(FixedStringTest, unittest.TestCase): type2test = bytes class SubclassTest: def test_basic(self): self.assertTrue(issubclass(self.type2test, self.basetype)) self.assertIsInstance(self.type2test(), self.basetype) a, b = b"abcd", b"efgh" _a, _b = self.type2test(a), self.type2test(b) # test comparison operators with subclass instances self.assertTrue(_a == _a) self.assertTrue(_a != _b) self.assertTrue(_a < _b) self.assertTrue(_a <= _b) self.assertTrue(_b >= _a) self.assertTrue(_b > _a) self.assertTrue(_a is not a) # test concat of subclass instances self.assertEqual(a + b, _a + _b) self.assertEqual(a + b, a + _b) self.assertEqual(a + b, _a + b) # test repeat self.assertTrue(a*5 == _a*5) def test_join(self): # Make sure join returns a NEW object for single item sequences # involving a subclass. # Make sure that it is of the appropriate type. s1 = self.type2test(b"abcd") s2 = self.basetype().join([s1]) self.assertTrue(s1 is not s2) self.assertTrue(type(s2) is self.basetype, type(s2)) # Test reverse, calling join on subclass s3 = s1.join([b"abcd"]) self.assertTrue(type(s3) is self.basetype) def test_pickle(self): a = self.type2test(b"abcd") a.x = 10 a.y = self.type2test(b"efgh") for proto in range(pickle.HIGHEST_PROTOCOL + 1): b = pickle.loads(pickle.dumps(a, proto)) self.assertNotEqual(id(a), id(b)) self.assertEqual(a, b) self.assertEqual(a.x, b.x) self.assertEqual(a.y, b.y) self.assertEqual(type(a), type(b)) self.assertEqual(type(a.y), type(b.y)) def test_copy(self): a = self.type2test(b"abcd") a.x = 10 a.y = self.type2test(b"efgh") for copy_method in (copy.copy, copy.deepcopy): b = copy_method(a) self.assertNotEqual(id(a), id(b)) self.assertEqual(a, b) self.assertEqual(a.x, b.x) self.assertEqual(a.y, b.y) self.assertEqual(type(a), type(b)) self.assertEqual(type(a.y), type(b.y)) def test_fromhex(self): b = self.type2test.fromhex('1a2B30') self.assertEqual(b, b'\x1a\x2b\x30') self.assertIs(type(b), self.type2test) class B1(self.basetype): def __new__(cls, value): me = self.basetype.__new__(cls, value) me.foo = 'bar' return me b = B1.fromhex('1a2B30') self.assertEqual(b, b'\x1a\x2b\x30') self.assertIs(type(b), B1) self.assertEqual(b.foo, 'bar') class B2(self.basetype): def __init__(me, *args, **kwargs): if self.basetype is not bytes: self.basetype.__init__(me, *args, **kwargs) me.foo = 'bar' b = B2.fromhex('1a2B30') self.assertEqual(b, b'\x1a\x2b\x30') self.assertIs(type(b), B2) self.assertEqual(b.foo, 'bar') class ByteArraySubclass(bytearray): pass class BytesSubclass(bytes): pass class OtherBytesSubclass(bytes): pass class ByteArraySubclassTest(SubclassTest, unittest.TestCase): basetype = bytearray type2test = ByteArraySubclass def test_init_override(self): class subclass(bytearray): def __init__(me, newarg=1, *args, **kwargs): bytearray.__init__(me, *args, **kwargs) x = subclass(4, b"abcd") x = subclass(4, source=b"abcd") self.assertEqual(x, b"abcd") x = subclass(newarg=4, source=b"abcd") self.assertEqual(x, b"abcd") class BytesSubclassTest(SubclassTest, unittest.TestCase): basetype = bytes type2test = BytesSubclass if __name__ == "__main__": unittest.main()
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from pprint import pprint from time import sleep # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(BitcoinTestFramework): def setup_chain(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, 4) def setup_network(self, split=False): self.nodes = start_nodes(4, self.options.tmpdir) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) connect_nodes_bi(self.nodes,0,3) self.is_network_split=False self.sync_all() def run_test(self): print "Mining blocks..." min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] # if the fee's positive delta is higher than this value tests will fail, # neg. delta always fail the tests. # The size of the signature of every input may be at most 2 bytes larger # than a minimum sized signature. # = 2 bytes * minRelayTxFeePerByte feeTolerance = 2 * min_relay_tx_fee/1000 self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(121) self.sync_all() watchonly_address = self.nodes[0].getnewaddress() watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"] watchonly_amount = 200 self.nodes[3].importpubkey(watchonly_pubkey, "", True) watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount) self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10); self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5); self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0); self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0); self.sync_all() self.nodes[0].generate(1) self.sync_all() ############### # simple test # ############### inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enought inputs ############################## # simple test with two coins # ############################## inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.2 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_equal(len(dec_tx['vin']) > 0, True) #test if we have enough inputs ############################## # simple test with two coins # ############################## inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.6 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_equal(len(dec_tx['vin']) > 0, True) assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') ################################ # simple test with two outputs # ################################ inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(len(dec_tx['vin']) > 0, True) assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') ######################################################################### # test a fundrawtransaction with a VIN greater than the required amount # ######################################################################### utx = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 5.0: utx = aUtx break; assert_equal(utx!=False, True) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee ##################################################################### # test a fundrawtransaction with which will not get a change output # ##################################################################### utx = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 5.0: utx = aUtx break; assert_equal(utx!=False, True) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(rawtxfund['changepos'], -1) assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee ######################################################################### # test a fundrawtransaction with a VIN smaller than the required amount # ######################################################################### utx = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 1.0: utx = aUtx break; assert_equal(utx!=False, True) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}] outputs = { self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) # 4-byte version + 1-byte vin count + 36-byte prevout then script_len rawtx = rawtx[:82] + "0100" + rawtx[84:] dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for i, out in enumerate(dec_tx['vout']): totalOut += out['value'] if outputs.has_key(out['scriptPubKey']['addresses'][0]): matchingOuts+=1 else: assert_equal(i, rawtxfund['changepos']) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) ########################################### # test a fundrawtransaction with two VINs # ########################################### utx = False utx2 = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 1.0: utx = aUtx if aUtx['amount'] == 5.0: utx2 = aUtx assert_equal(utx!=False, True) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] outputs = { self.nodes[0].getnewaddress() : 6.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if outputs.has_key(out['scriptPubKey']['addresses'][0]): matchingOuts+=1 assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) matchingIns = 0 for vinOut in dec_tx['vin']: for vinIn in inputs: if vinIn['txid'] == vinOut['txid']: matchingIns+=1 assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params ######################################################### # test a fundrawtransaction with two VINs and two vOUTs # ######################################################### utx = False utx2 = False listunspent = self.nodes[2].listunspent() for aUtx in listunspent: if aUtx['amount'] == 1.0: utx = aUtx if aUtx['amount'] == 5.0: utx2 = aUtx assert_equal(utx!=False, True) inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ] outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if outputs.has_key(out['scriptPubKey']['addresses'][0]): matchingOuts+=1 assert_equal(matchingOuts, 2) assert_equal(len(dec_tx['vout']), 3) ############################################## # test a fundrawtransaction with invalid vin # ############################################## listunspent = self.nodes[2].listunspent() inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin! outputs = { self.nodes[0].getnewaddress() : 1.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) errorString = "" try: rawtxfund = self.nodes[2].fundrawtransaction(rawtx) except JSONRPCException,e: errorString = e.error['message'] assert_equal("Insufficient" in errorString, True); ############################################################ #compare fee of a standard pubkeyhash transaction inputs = [] outputs = {self.nodes[1].getnewaddress():1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1); signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee); assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ #compare fee of a standard pubkeyhash transaction with multiple outputs inputs = [] outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendmany("", outputs); signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee); assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ #compare fee of a 2of2 multisig p2sh transaction # create 2of2 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[1].validateaddress(addr2) mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) inputs = [] outputs = {mSigObj:1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1); signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee); assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ #compare fee of a standard pubkeyhash transaction # create 4of5 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr3 = self.nodes[1].getnewaddress() addr4 = self.nodes[1].getnewaddress() addr5 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[1].validateaddress(addr2) addr3Obj = self.nodes[1].validateaddress(addr3) addr4Obj = self.nodes[1].validateaddress(addr4) addr5Obj = self.nodes[1].validateaddress(addr5) mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']]) inputs = [] outputs = {mSigObj:1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1); signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee); assert(feeDelta >= 0 and feeDelta <= feeTolerance) ############################################################ ############################################################ # spend a 2of2 multisig transaction over fundraw # create 2of2 addr addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].validateaddress(addr1) addr2Obj = self.nodes[2].validateaddress(addr2) mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # send 1.2 BTC to msig addr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2); self.sync_all() self.nodes[1].generate(1) self.sync_all() oldBalance = self.nodes[1].getbalance() inputs = [] outputs = {self.nodes[1].getnewaddress():1.1} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) fundedTx = self.nodes[2].fundrawtransaction(rawTx) signedTx = self.nodes[2].signrawtransaction(fundedTx['hex']) txId = self.nodes[2].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance()) ############################################################ # locked wallet test self.nodes[1].encryptwallet("test") self.nodes.pop(1) stop_nodes(self.nodes) wait_bitcoinds() self.nodes = start_nodes(4, self.options.tmpdir) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) connect_nodes_bi(self.nodes,0,3) self.is_network_split=False self.sync_all() error = False try: self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2); except: error = True assert(error) oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress():1.1} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) #now we need to unlock self.nodes[1].walletpassphrase("test", 100) signedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance()) ############################################### # multiple (~19) inputs tx test | Compare fee # ############################################### #empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True); self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0,20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01); self.sync_all() self.nodes[0].generate(1) self.sync_all() #fund a tx with ~20 small inputs inputs = [] outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) #create same transaction over sendtoaddress txId = self.nodes[1].sendmany("", outputs); signedFee = self.nodes[1].getrawmempool(True)[txId]['fee'] #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee); assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs ############################################# # multiple (~19) inputs tx test | sign/send # ############################################# #again, empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True); self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0,20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01); self.sync_all() self.nodes[0].generate(1) self.sync_all() #fund a tx with ~20 small inputs oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward ##################################################### # test fundrawtransaction with OP_RETURN and no vin # ##################################################### rawtx = "0100000000010000000000000000066a047465737400000000" dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(len(dec_tx['vin']), 0) assert_equal(len(dec_tx['vout']), 1) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_greater_than(len(dec_tx['vin']), 0) # at least one vin assert_equal(len(dec_tx['vout']), 2) # one change output added ################################################## # test a fundrawtransaction using only watchonly # ################################################## inputs = [] outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction(rawtx, True) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 1) assert_equal(res_dec["vin"][0]["txid"], watchonly_txid) assert_equal("fee" in result.keys(), True) assert_greater_than(result["changepos"], -1) ############################################################### # test fundrawtransaction using the entirety of watched funds # ############################################################### inputs = [] outputs = {self.nodes[2].getnewaddress() : watchonly_amount} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction(rawtx, True) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 2) assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid) assert_greater_than(result["fee"], 0) assert_greater_than(result["changepos"], -1) assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10) signedtx = self.nodes[3].signrawtransaction(result["hex"]) assert(not signedtx["complete"]) signedtx = self.nodes[0].signrawtransaction(signedtx["hex"]) assert(signedtx["complete"]) self.nodes[0].sendrawtransaction(signedtx["hex"]) if __name__ == '__main__': RawTransactionsTest().main()
import numpy as np import pytest from sklearn_extra.robust import ( RobustWeightedClassifier, RobustWeightedRegressor, RobustWeightedKMeans, ) from sklearn.datasets import make_blobs from sklearn.linear_model import SGDClassifier, SGDRegressor, HuberRegressor from sklearn.cluster import KMeans from sklearn.utils import shuffle from sklearn.metrics import r2_score from sklearn.utils._testing import ( assert_array_almost_equal, assert_almost_equal, ) # Test version of sklearn, in version older than v1.0 squared_loss must be used import sklearn if sklearn.__version__[0] == "0": SQ_LOSS = "squared_loss" else: SQ_LOSS = "squared_error" k_values = [None, 10] # values of k for test robust c_values = [None, 1e-3] # values of c for test robust # Classification test with outliers rng = np.random.RandomState(42) X_cc, y_cc = make_blobs( n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng, ) for f in range(3): X_cc[f] = [10, 5] + rng.normal(size=2) * 0.1 y_cc[f] = 0 classif_losses = ["log", "hinge"] weightings = ["huber", "mom"] multi_class = ["ovr", "ovo"] def test_robust_estimator_max_iter(): """Test that warning message is thrown when max_iter is reached.""" model = RobustWeightedClassifier(max_iter=1) msg = "Maximum number of iteration reached before" with pytest.warns(UserWarning, match=msg): model.fit(X_cc, y_cc) def test_robust_estimator_unsupported_loss(): """Test that warning message is thrown when unsupported loss.""" model = RobustWeightedClassifier(loss="invalid") msg = "The loss invalid is not supported. " with pytest.raises(ValueError, match=msg): model.fit(X_cc, y_cc) def test_robust_estimator_unsupported_weighting(): """Test that warning message is thrown when unsupported weighting.""" model = RobustWeightedClassifier(weighting="invalid") msg = "No such weighting scheme" with pytest.raises(ValueError, match=msg): model.fit(X_cc, y_cc) def test_robust_estimator_unsupported_multiclass(): """Test that warning message is thrown when unsupported weighting.""" model = RobustWeightedClassifier(multi_class="invalid") msg = "No such multiclass method implemented." with pytest.raises(ValueError, match=msg): model.fit(X_cc, y_cc) def test_robust_estimator_input_validation_and_fit_check(): # Invalid parameters msg = "max_iter must be > 0, got 0." with pytest.raises(ValueError, match=msg): RobustWeightedKMeans(max_iter=0).fit(X_cc) msg = "c must be > 0, got 0." with pytest.raises(ValueError, match=msg): RobustWeightedKMeans(c=0).fit(X_cc) msg = "burn_in must be >= 0, got -1." with pytest.raises(ValueError, match=msg): RobustWeightedClassifier(burn_in=-1).fit(X_cc, y_cc) msg = "eta0 must be > 0, got 0." with pytest.raises(ValueError, match=msg): RobustWeightedClassifier(burn_in=1, eta0=0).fit(X_cc, y_cc) msg = "k must be integer >= 0, and smaller than floor" with pytest.raises(ValueError, match=msg): RobustWeightedKMeans(k=-1).fit(X_cc) @pytest.mark.parametrize("loss", classif_losses) @pytest.mark.parametrize("weighting", weightings) @pytest.mark.parametrize("k", k_values) @pytest.mark.parametrize("c", c_values) @pytest.mark.parametrize("multi_class", multi_class) def test_corrupted_classif(loss, weighting, k, c, multi_class): clf = RobustWeightedClassifier( loss=loss, max_iter=100, weighting=weighting, k=k, c=c, multi_class=multi_class, random_state=rng, ) clf.fit(X_cc, y_cc) score = clf.score(X_cc, y_cc) assert score > 0.8 # Classification test without outliers rng = np.random.RandomState(42) X_c, y_c = make_blobs( n_samples=100, centers=np.array([[-1, -1], [1, 1], [3, -1]]), random_state=rng, ) # check binary throw an error def test_robust_estimator_unsupported_loss(): model = RobustWeightedClassifier(multi_class="binary") msg = "y must be binary." with pytest.raises(ValueError, match=msg): model.fit(X_c, y_c) # Check that the fit is close to SGD when in extremal parameter cases @pytest.mark.parametrize("loss", classif_losses) @pytest.mark.parametrize("weighting", weightings) @pytest.mark.parametrize("multi_class", multi_class) def test_not_robust_classif(loss, weighting, multi_class): clf = RobustWeightedClassifier( loss=loss, max_iter=100, weighting=weighting, k=0, c=1e7, burn_in=0, multi_class=multi_class, random_state=rng, ) clf_not_rob = SGDClassifier(loss=loss, random_state=rng) clf.fit(X_c, y_c) clf_not_rob.fit(X_c, y_c) pred1 = clf.predict(X_c) pred2 = clf_not_rob.predict(X_c) assert np.mean((pred1 > 0) == (pred2 > 0)) > 0.8 assert clf.score(X_c, y_c) == np.mean(pred1 == y_c) # Make binary uncorrupted dataset X_cb, y_cb = make_blobs( n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng ) @pytest.mark.parametrize("weighting", weightings) def test_classif_binary(weighting): clf = RobustWeightedClassifier( max_iter=100, weighting=weighting, k=0, c=1e7, burn_in=0, multi_class="binary", random_state=rng, ) clf_not_rob = SGDClassifier(loss="log", random_state=rng) clf.fit(X_cb, y_cb) clf_not_rob.fit(X_cb, y_cb) norm_coef1 = np.linalg.norm(np.hstack([clf.coef_.ravel(), clf.intercept_])) norm_coef2 = np.linalg.norm( np.hstack([clf_not_rob.coef_.ravel(), clf_not_rob.intercept_]) ) coef1 = clf.coef_ / norm_coef1 coef2 = clf_not_rob.coef_ / norm_coef2 intercept1 = clf.intercept_ / norm_coef1 intercept2 = clf_not_rob.intercept_ / norm_coef2 assert np.linalg.norm(coef1 - coef2) < 0.5 assert np.linalg.norm(intercept1 - intercept2) < 0.5 assert len(clf.weights_) == len(X_cb) # Check that weights_ parameter can be used as outlier score. @pytest.mark.parametrize("weighting", weightings) def test_classif_corrupted_weights(weighting): clf = RobustWeightedClassifier( max_iter=100, weighting=weighting, k=5, c=1, burn_in=0, multi_class="binary", random_state=rng, ) clf.fit(X_cc, y_cc) assert np.mean(clf.weights_[:3]) < np.mean(clf.weights_[3:]) # Case "log" loss, test predict_proba @pytest.mark.parametrize("weighting", weightings) def test_predict_proba(weighting): clf = RobustWeightedClassifier( max_iter=100, weighting=weighting, k=0, c=1e7, burn_in=0, random_state=rng, ) clf_not_rob = SGDClassifier(loss="log", random_state=rng) clf.fit(X_c, y_c) clf_not_rob.fit(X_c, y_c) pred1 = clf.base_estimator_.predict_proba(X_c)[:, 1] pred2 = clf_not_rob.predict_proba(X_c)[:, 1] assert np.mean((pred1 > 1 / 2) == (pred2 > 1 / 2)) > 0.8 # check that classifier with another loss than log raises an error def test_robust_no_proba(): est = RobustWeightedClassifier(loss="hinge").fit(X_c, y_c) msg = "Probability estimates are not available for loss='hinge'" with pytest.raises(AttributeError, match=msg): est.predict_proba(X_c) # Regression test with outliers X_rc = rng.uniform(-1, 1, size=[200]) y_rc = X_rc + 0.1 * rng.normal(size=200) X_rc[0] = 10 X_rc = X_rc.reshape(-1, 1) y_rc[0] = -1 regression_losses = [SQ_LOSS, "huber"] @pytest.mark.parametrize("loss", regression_losses) @pytest.mark.parametrize("weighting", weightings) @pytest.mark.parametrize("k", k_values) @pytest.mark.parametrize("c", c_values) def test_corrupted_regression(loss, weighting, k, c): reg = RobustWeightedRegressor( loss=loss, max_iter=50, weighting=weighting, k=k, c=c, random_state=rng, n_iter_no_change=20, ) reg.fit(X_rc, y_rc) assert np.abs(reg.coef_[0] - 1) < 0.1 assert np.abs(reg.intercept_[0]) < 0.1 # Check that weights_ parameter can be used as outlier score. @pytest.mark.parametrize("weighting", weightings) def test_regression_corrupted_weights(weighting): reg = RobustWeightedRegressor( max_iter=100, weighting=weighting, k=5, c=1, burn_in=0, random_state=rng, ) reg.fit(X_rc, y_rc) assert reg.weights_[0] < np.mean(reg.weights_[1:]) X_r = rng.uniform(-1, 1, size=[1000]) y_r = X_r + 0.1 * rng.normal(size=1000) X_r = X_r.reshape(-1, 1) # Check that the fit is close to SGD when in extremal parameter cases @pytest.mark.parametrize("loss", regression_losses) @pytest.mark.parametrize("weighting", weightings) def test_not_robust_regression(loss, weighting): reg = RobustWeightedRegressor( loss=loss, max_iter=100, weighting=weighting, k=0, c=1e7, burn_in=0, random_state=rng, ) reg_not_rob = SGDRegressor(loss=loss, random_state=rng) reg.fit(X_r, y_r) reg_not_rob.fit(X_r, y_r) pred1 = reg.predict(X_r) pred2 = reg_not_rob.predict(X_r) difference = [ np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1)) ] assert np.mean(difference) < 1 assert_almost_equal(reg.score(X_r, y_r), r2_score(y_r, reg.predict(X_r))) # Compare with HuberRegressor on dataset corrupted in y X_rcy = rng.uniform(-1, 1, size=[200]) y_rcy = X_rcy + 0.1 * rng.normal(size=200) X_rcy = X_rcy.reshape(-1, 1) y_rcy[0] = -1 def test_vs_huber(): reg1 = RobustWeightedRegressor( max_iter=100, weighting="huber", k=5, c=1, burn_in=0, sgd_args={"learning_rate": "adaptive"}, # test sgd_args random_state=rng, ) reg2 = HuberRegressor() reg1.fit(X_rcy, y_rcy) reg2.fit(X_rcy, y_rcy) assert np.abs(reg1.coef_[0] - reg2.coef_[0]) < 1e-2 # Clustering test with outliers rng = np.random.RandomState(42) X_clusterc, y_clusterc = make_blobs( n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng ) for f in range(3): X_clusterc[f] = [20, 5] + rng.normal(size=2) * 0.1 y_clusterc[f] = 0 X_cluster, y_cluster = shuffle(X_clusterc, y_clusterc, random_state=rng) weightings = ["huber", "mom"] @pytest.mark.parametrize("weighting", weightings) @pytest.mark.parametrize("k", k_values) @pytest.mark.parametrize("c", c_values) def test_corrupted_cluster(weighting, k, c): km = RobustWeightedKMeans( n_clusters=2, max_iter=50, weighting=weighting, k=5, c=None, random_state=rng, ) km.fit(X_clusterc) error = np.mean((km.predict(X_clusterc) - y_clusterc) ** 2) assert error < 100 # Clustering test without outliers rng = np.random.RandomState(42) X_cluster, y_cluster = make_blobs( n_samples=100, centers=np.array([[-1, -1], [1, 1]]), random_state=rng ) # Check that the fit is close to KMeans when in extremal parameter cases @pytest.mark.parametrize("weighting", weightings) def test_not_robust_cluster(weighting): clf = RobustWeightedKMeans( n_clusters=2, max_iter=100, weighting=weighting, k=0, c=1e7, random_state=rng, ) clf_not_rob = KMeans(2, random_state=rng) clf.fit(X_cluster) clf_not_rob.fit(X_cluster) pred1 = [clf.cluster_centers_[i] for i in clf.predict(X_cluster)] pred2 = [ clf_not_rob.cluster_centers_[i] for i in clf_not_rob.predict(X_cluster) ] difference = [ np.linalg.norm(pred1[i] - pred2[i]) for i in range(len(pred1)) ] assert np.mean(difference) < 1 def test_transform(): n_clusters = 2 km = RobustWeightedKMeans(n_clusters=n_clusters, random_state=rng) km.fit(X_cluster) X_new = km.transform(km.cluster_centers_) for c in range(n_clusters): assert X_new[c, c] == 0 for c2 in range(n_clusters): if c != c2: assert X_new[c, c2] > 0 def test_fit_transform(): X1 = ( RobustWeightedKMeans(n_clusters=2, random_state=42) .fit(X_cluster) .transform(X_cluster) ) X2 = RobustWeightedKMeans(n_clusters=2, random_state=42).fit_transform( X_cluster ) assert_array_almost_equal(X1, X2)
# Copyright 2014 OpenCore LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import ferry.install from ferry.install import Installer from ferry.config.system.info import System from heatclient import client as heat_client from heatclient.exc import HTTPUnauthorized, HTTPNotFound, HTTPBadRequest import json import logging import math from neutronclient.neutron import client as neutron_client from novaclient import client as nova_client import os from pymongo import MongoClient import sys import time import uuid import yaml class SingleLauncher(object): """ Launches new Ferry containers on an OpenStack cluster. Unlike the multi-launcher, containers use a single pre-assigned network for all communication. This makes it suitable for OpenStack environments that only support a single network (i.e., HP Cloud). """ def __init__(self, controller): self.name = "OpenStack launcher" self.docker_registry = None self.docker_user = None self.heat_server = None self.openstack_key = None self.system = System() self.installer = Installer() self.controller = controller self._init_open_stack() self._init_app_db() def support_proxy(self): """ The OpenStack backend supports proxy mode by assigning all the machines a floating IP. """ return True def _init_app_db(self): self.mongo = MongoClient(os.environ['MONGODB'], 27017, connectTimeoutMS=6000) self.apps = self.mongo['cloud']['openstack'] def _init_open_stack(self): conf = ferry.install.read_ferry_config() # First we need to know the deployment system # we are using. self.data_device = conf['system']['network'] provider = conf['system']['provider'] # Now get some basic OpenStack information params = conf[provider]['params'] self.default_dc = params['dc'] self.default_zone = params['zone'] # Some OpenStack login credentials. if self._check_openstack_credentials(): self.openstack_user = os.environ['OS_USERNAME'] self.openstack_pass = os.environ['OS_PASSWORD'] self.tenant_id = os.environ['OS_TENANT_ID'] self.tenant_name = os.environ['OS_TENANT_NAME'] else: logging.error("Missing OpenStack credentials") raise ValueError("Missing OpenStack credentials") # Some information regarding OpenStack # networking. Necessary for servers = conf[provider][self.default_dc] self.manage_network = servers['network'] self.external_network = servers['extnet'] # OpenStack API endpoints. self.region = servers['region'] self.keystone_server = servers['keystone'] self.nova_server = servers['nova'] self.neutron_server = servers['neutron'] # Check if the user has provided a Heat # server. Not all OpenStack clusters provide # Heat. If not, we'll need to start a local instance. self.heatuuid = None if 'HEAT_URL' in os.environ: self.heat_server = os.environ['HEAT_URL'] elif 'heat' in servers: self.heat_server = servers['heat'] else: self.heat_server = self._check_and_start_heat(self.tenant_id) logging.warning("using heat server " + str(self.heat_server)) # This gives us information about the image to use # for the supplied provider. deploy = conf[provider]['deploy'] self.default_image = deploy['image'] self.default_personality = deploy['personality'] self.default_user = deploy['default-user'] self.ssh_key = deploy['ssh'] self.ssh_user = deploy['ssh-user'] # Make sure that the ssh key is actually present. keypath = self._get_host_key() if not os.path.exists(keypath): logging.error("could not find ssh key (%s)" % self.ssh_key) raise ValueError("Missing ssh keys") # Initialize the OpenStack clients and also # download some networking information (subnet ID, # cidr, gateway, etc.) self._init_openstack_clients() self._collect_subnet_info() def _get_host_key(self): """ Get the location of the private ssh key. """ p = self.ssh_key.split("/") if len(p) == 1: return "/ferry/keys/" + self.ssh_key + ".pem" else: return self.ssh_key + ".pem" def _check_and_start_heat(self, tenant_id): """ Check and start the Ferry Heat image. """ # Check if the image is downloaded locally. # If not, it will automatically pull it. logging.info("Check for Heat image") self.installer._check_and_pull_image("ferry/heatserver") # Check if the Heat log directory exists yet. If not # go ahead and create it. heatlogs = ferry.install.DOCKER_DIR + "/heatlog" try: if not os.path.isdir(heatlogs): os.makedirs(heatlogs) self.installer._change_permission(heatlogs) except OSError as e: logging.error(e.strerror) sys.exit(1) # Start the Heat image and capture the IP address. We can # then hand over this IP to the rest of the configuration. volumes = { heatlogs : "/var/log/heat" } heatplan = {'image':'ferry/heatserver', 'type':'ferry/heatserver', 'keydir': {}, 'keyname': None, 'privatekey': None, 'volumes':volumes, 'volume_user':ferry.install.DEFAULT_FERRY_OWNER, 'ports':[], 'exposed':["8004","8000"], 'internal':[], 'hostname':'heatserver', 'netenable':True, 'default_cmd' : '', 'args': 'trust' } self.heatuuid = 'fht-' + str(uuid.uuid4()).split('-')[0] self.heatbox = self.installer.fabric.alloc(self.heatuuid, self.heatuuid, [heatplan], "HEAT")[0] if not self.heatbox: logging.error("Could not start Heat server") sys.exit(1) else: return "http://%s:8004/v1/%s" % (str(self.heatbox.internal_ip), tenant_id) def _check_openstack_credentials(self): envs = ['OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_ID', 'OS_TENANT_NAME'] for e in envs: if not e in os.environ: return False return True def _init_openstack_clients(self): # Instantiate the Heat client. if 'HEAT_API_VERSION' in os.environ: heat_api_version = os.environ['HEAT_API_VERSION'] else: heat_api_version = '1' kwargs = { 'username' : self.openstack_user, 'password' : self.openstack_pass, 'include_pass' : True, 'tenant_id': self.tenant_id, 'tenant_name': self.tenant_name, 'auth_url' : self.keystone_server } self.heat = heat_client.Client(heat_api_version, self.heat_server, **kwargs) # Check to make sure that the Heat client can actually # connect to the Heat server. This is because we may # have just started the Heat server, so it can a while to refresh. for i in range(0, 10): try: stacks = self.heat.stacks.list() for s in stacks: logging.warning("found Heat stack: " + str(s)) connected = True break except: time.sleep(12) connected = False if not connected: raise ValueError("Could not connect to Heat") # Instantiate the Neutron client. # There should be a better way of figuring out the API version. neutron_api_version = "2.0" kwargs['endpoint_url'] = self.neutron_server self.neutron = neutron_client.Client(neutron_api_version, **kwargs) # Instantiate the Nova client. The Nova client is used # to stop/restart instances. nova_api_version = "1.1" kwargs = { 'username' : self.openstack_user, 'api_key' : self.openstack_pass, 'tenant_id': self.tenant_id, 'auth_url' : self.keystone_server, 'service_type' : 'compute', 'region_name' : self.region } self.nova = nova_client.Client(nova_api_version, **kwargs) def _create_floating_ip(self, name, port): """ Create and attach a floating IP to the supplied port. """ plan = { name : { "Type": "OS::Neutron::FloatingIP", "Properties": { "floating_network_id": self.external_network }}, name + "_assoc" : { "Type": "OS::Neutron::FloatingIPAssociation", "Properties": { "floatingip_id": { "Ref" : name }, "port_id": { "Ref" : port }}}} desc = { "type" : "OS::Neutron::FloatingIP" } return plan, desc def _create_security_group(self, group_name, ports, internal): """ Create and assign a security group to the supplied server. """ # Create the basic security group. # This only includes SSH. We can later update the group # to include additional ports. desc = { group_name : { "Type" : "OS::Neutron::SecurityGroup", "Properties" : { "name" : group_name, "description" : "Ferry firewall rules", "rules" : [ { "protocol" : "icmp", "remote_ip_prefix": "0.0.0.0/0" }, { "protocol" : "tcp", "remote_ip_prefix": "0.0.0.0/0", "port_range_min" : 22, "port_range_max" : 22 }]}}} # Additional ports for the security group. These # port values can be accessible from anywhere. for p in ports: min_port = p[0] max_port = p[1] desc[group_name]["Properties"]["rules"].append({ "protocol" : "tcp", "remote_ip_prefix": "0.0.0.0/0", "port_range_min" : min_port, "port_range_max" : max_port }) # Additional ports for the security group. These # port values can only be accessed from within the same network. for p in internal: min_port = p[0] max_port = p[1] desc[group_name]["Properties"]["rules"].append({ "protocol" : "tcp", "remote_ip_prefix": self.subnet["cidr"], "port_range_min" : min_port, "port_range_max" : max_port }) return desc def _create_storage_volume(self, volume_name, server_name, size_gb): """ Create and attach a storage volume to the supplied server. """ desc = { volume_name : { "Type" : "OS::Cinder::Volume", "Properties": { "size" : size_db, "availability_zone": self.default_zone }}, volume_name + "_attachment" : { "Type" : "OS::Cinder::VolumeAttachment", "Properties": { "volume_id" : { "Ref" : volume_name }, "instance_uuid": { "Ref" : server_name }, "mount_point": "/dev/vdc" }}} return desc def _create_port(self, name, network, sec_group, ref=True): desc = { name : { "Type" : "OS::Neutron::Port", "Properties" : { "name" : name, "security_groups" : [{ "Ref" : sec_group }]}}} if ref: desc[name]["Properties"]["network"] = { "Ref" : network } else: desc[name]["Properties"]["network"] = network return desc def _create_server_init(self): """ Create the server init process. These commands are run on the host after the host has booted up. """ user_data = { "Fn::Base64": { "Fn::Join": [ "", [ "#!/bin/bash -v\n", "umount /mnt\n", "parted --script /dev/vdb mklabel gpt\n", "parted --script /dev/vdb mkpart primary xfs 0% 100%\n", "mkfs.xfs /dev/vdb1\n", "mkdir /ferry/data\n", "mkdir /ferry/keys\n", "mkdir /ferry/containers\n", "mount -o noatime /dev/vdb1 /ferry/data\n", "export FERRY_SCRATCH=/ferry/data\n", "export FERRY_DIR=/ferry/master\n", "echo export FERRY_SCRATCH=/ferry/data >> /etc/profile\n", "echo export FERRY_DIR=/ferry/master >> /etc/profile\n", "export HOME=/root\n", "export USER=root\n", "mkdir /home/ferry/.ssh\n", "cp /home/%s/.ssh/authorized_keys /home/ferry/.ssh/\n" % self.default_user, "cp /home/%s/.ssh/authorized_keys /root/.ssh/\n" % self.default_user, "chown -R ferry:ferry /home/ferry/.ssh\n", "chown -R ferry:ferry /ferry/data\n", "chown -R ferry:ferry /ferry/keys\n", "chown -R ferry:ferry /ferry/containers\n", "ferry server -n\n", "sleep 3\n" ] ] }} return user_data def _create_volume_attachment(self, iface, instance, volume_id): plan = { iface: { "Type": "OS::Cinder::VolumeAttachment", "Properties": { "instance_uuid": { "Ref" : instance }, "mountpoint": "/dev/vdc", "volume_id": volume_id}}} desc = { "type" : "OS::Cinder::VolumeAttachment" } return plan, desc def _create_instance(self, name, image, size, manage_network, sec_group): """ Create a new instance """ plan = { name : { "Type" : "OS::Nova::Server", "Properties" : { "name" : name, "image" : image, "key_name" : self.ssh_key, "flavor" : size, "availability_zone" : self.default_zone, "networks" : []}}} desc = { name : { "type" : "OS::Nova::Server", "name" : name, "ports" : [], "volumes" : [] }} # Create a port for the manage network. port_descs = [] port_name = "ferry-port-%s" % name port_descs.append(self._create_port(port_name, manage_network, sec_group, ref=False)) plan[name]["Properties"]["networks"].append({ "port" : { "Ref" : port_name }, "network" : manage_network}) desc[name]["ports"].append(port_name) desc[port_name] = { "type" : "OS::Neutron::Port", "role" : "manage" } # Combine all the port descriptions. for d in port_descs: plan = dict(plan.items() + d.items()) # Now add the user script. user_data = self._create_server_init() plan[name]["Properties"]["user_data"] = user_data return plan, desc def _create_floatingip_plan(self, cluster_uuid, ifaces): """ Assign floating IPs to the supplied interfaces/ports. """ plan = { "AWSTemplateFormatVersion" : "2010-09-09", "Description" : "Ferry generated Heat plan", "Resources" : {} } desc = {} for i in range(0, len(ifaces)): ip_name = "ferry-ip-%s-%d" % (cluster_uuid, i) ip_plan, desc[ip_name] = self._create_floating_ip(ip_name, ifaces[i]) plan["Resources"] = dict(plan["Resources"].items() + ip_plan.items()) return plan, desc def _create_security_plan(self, cluster_uuid, ports, internal, ctype): """ Update the security group. """ sec_group_name = "ferry-sec-%s-%s" % (cluster_uuid, ctype) plan = { "AWSTemplateFormatVersion" : "2010-09-09", "Description" : "Ferry generated Heat plan", "Resources" : self._create_security_group(sec_group_name, ports, internal)} desc = { sec_group_name : { "type" : "OS::Neutron::SecurityGroup" }} return plan, desc def _create_instance_plan(self, cluster_uuid, num_instances, image, size, sec_group_name, ctype): plan = { "AWSTemplateFormatVersion" : "2010-09-09", "Description" : "Ferry generated Heat plan", "Resources" : {}, "Outputs" : {} } desc = {} for i in range(0, num_instances): # Create the actual instances. instance_name = "ferry-instance-%s-%s-%d" % (cluster_uuid, ctype, i) instance_plan, instance_desc = self._create_instance(instance_name, image, size, self.manage_network, sec_group_name) plan["Resources"] = dict(plan["Resources"].items() + instance_plan.items()) desc = dict(desc.items() + instance_desc.items()) return plan, desc def _launch_heat_plan(self, stack_name, heat_plan, stack_desc): """ Launch the cluster plan. """ logging.info("launching heat plan: " + str(heat_plan)) try: # Try to create the application stack. resp = self.heat.stacks.create(stack_name=stack_name, template=heat_plan) except HTTPBadRequest as e: logging.error(e.strerror) return None except: # We could not create the stack. This probably # means that either the Heat server is down or the # OpenStack cluster is down. logging.error("could not create Heat stack") return None # Now wait for the stack to be in a completed state # before returning. That way we'll know if the stack creation # has failed or not. if not self._wait_for_stack(resp["stack"]["id"]): logging.warning("Heat plan %s CREATE_FAILED" % resp["stack"]["id"]) return None # Now find the physical IDs of all the resources. resources = self._collect_resources(resp["stack"]["id"]) for r in resources: if r["logical_resource_id"] in stack_desc: stack_desc[r["logical_resource_id"]]["id"] = r["physical_resource_id"] # Record the Stack ID in the description so that # we can refer back to it later. stack_desc[stack_name] = { "id" : resp["stack"]["id"], "type": "OS::Heat::Stack" } return stack_desc def _wait_for_stack(self, stack_id): """ Wait for stack completion. """ while(True): try: stack = self.heat.stacks.get(stack_id) if stack.status == "COMPLETE": return True elif stack.status == "FAILED": return False else: time.sleep(4) except: logging.error("could not fetch stack status (%s)" % str(stack_id)) def _collect_resources(self, stack_id): """ Collect all the stack resources so that we can create additional plans and use IDs. """ try: resources = self.heat.resources.list(stack_id) descs = [r.to_dict() for r in resources] return descs except: return [] def _collect_subnet_info(self): """ Collect the data network subnet info (ID, CIDR, and gateway). """ subnets = self.neutron.list_subnets() for s in subnets['subnets']: if s['network_id'] == self.manage_network: self.subnet = { "id" : s['id'], "cidr" : s['cidr'], "gateway" : s['gateway_ip'] } def _collect_network_info(self, stack_desc): """ Collect all the networking information. """ # First get the floating IP information. ip_map = {} floatingips = self.neutron.list_floatingips() for f in floatingips['floatingips']: if f['fixed_ip_address']: ip_map[f['fixed_ip_address']] = f['floating_ip_address'] # Now fill in the various networking information, including # subnet, IP address, and floating address. We should also # probably collect MAC addresseses.. ports = self.neutron.list_ports() for p in ports['ports']: if p['name'] != "" and p['name'] in stack_desc: port_desc = stack_desc[p['name']] port_desc["subnet"] = p['fixed_ips'][0]['subnet_id'] port_desc["ip_address"] = p['fixed_ips'][0]['ip_address'] # Not all ports are associated with a floating IP, so # we need to check first. if port_desc["ip_address"] in ip_map: port_desc["floating_ip"] = ip_map[port_desc["ip_address"]] return stack_desc def _collect_instance_info(self, stack_desc): """ Collect all the instance information. """ servers = self.nova.servers.list() for s in servers: if s.name != "" and s.name in stack_desc: instance_desc = stack_desc[s.name] instance_desc["id"] = s.id return stack_desc def _create_app_stack(self, cluster_uuid, num_instances, security_group_ports, internal_ports, assign_floating_ip, ctype): """ Create an empty application stack. This includes the instances, security groups, and floating IPs. """ logging.info("creating security group for %s" % cluster_uuid) sec_group_plan, sec_group_desc = self._create_security_plan(cluster_uuid = cluster_uuid, ports = security_group_ports, internal = internal_ports, ctype = ctype) logging.info("creating instances for %s" % cluster_uuid) stack_plan, stack_desc = self._create_instance_plan(cluster_uuid = cluster_uuid, num_instances = num_instances, image = self.default_image, size = self.default_personality, sec_group_name = sec_group_desc.keys()[0], ctype = ctype) # See if we need to assign any floating IPs # for this stack. We need the references to the neutron # port which is contained in the description. if assign_floating_ip: logging.info("creating floating IPs for %s" % cluster_uuid) ifaces = [] for k in stack_desc.keys(): if stack_desc[k]["type"] == "OS::Neutron::Port" and stack_desc[k]["role"] == "manage": ifaces.append(k) ip_plan, ip_desc = self._create_floatingip_plan(cluster_uuid = cluster_uuid, ifaces = ifaces) else: ip_plan = { "Resources" : {}} ip_desc = {} # Now we need to combine all these plans and # launch the cluster. stack_plan["Resources"] = dict(sec_group_plan["Resources"].items() + ip_plan["Resources"].items() + stack_plan["Resources"].items()) stack_desc = dict(stack_desc.items() + sec_group_desc.items() + ip_desc.items()) stack_desc = self._launch_heat_plan("ferry-app-%s-%s" % (ctype.upper(), cluster_uuid), stack_plan, stack_desc) # Now find all the IP addresses of the various machines. if stack_desc: stack_desc = self._collect_instance_info(stack_desc) return self._collect_network_info(stack_desc) else: return None def _get_private_ip(self, server, subnet_id, resources): """ Get the IP address associated with the supplied server. """ for port_name in server["ports"]: port_desc = resources[port_name] if port_desc["subnet"] == subnet_id: return port_desc["ip_address"] def _get_public_ip(self, server, resources): """ Get the IP address associated with the supplied server. """ for port_name in server["ports"]: port_desc = resources[port_name] if "floating_ip" in port_desc: return port_desc["floating_ip"] def _get_servers(self, resources): servers = [] for r in resources.values(): if type(r) is dict and r["type"] == "OS::Nova::Server": servers.append(r) return servers def _get_net_info(self, server_info, subnet, resources): """ Look up the IP address, gateway, and subnet range. """ cidr = subnet["cidr"].split("/")[1] ip = self._get_private_ip(server_info, subnet["id"], resources) # We want to use the host NIC, so modify LXC to use phys networking, and # then start the docker containers on the server. lxc_opts = ["lxc.network.type = phys", "lxc.network.ipv4 = %s/%s" % (ip, cidr), "lxc.network.ipv4.gateway = %s" % subnet["gateway"], "lxc.network.link = %s" % self.data_device, "lxc.network.name = eth0", "lxc.network.flags = up"] return lxc_opts, ip def _update_app_db(self, cluster_uuid, service_uuid, heat_plan): # Make a copy of the plan before inserting into # mongo, otherwise the "_id" field will be added # silently. heat_plan["_cluster_uuid"] = cluster_uuid heat_plan["_service_uuid"] = service_uuid self.apps.insert(copy.deepcopy(heat_plan)) def alloc(self, cluster_uuid, service_uuid, container_info, ctype, proxy): """ Allocate a new cluster. """ # Now take the cluster and create the security group # to expose all the right ports. sec_group_ports = [] internal_ports = [] if ctype == "connector": # Since this is a connector, we need to expose # the public ports. For now, we ignore the host port. floating_ip = True for c in container_info: for p in c['ports']: s = str(p).split(":") if len(s) > 1: sec_group_ports.append( (s[1], s[1]) ) else: sec_group_ports.append( (s[0], s[0]) ) else: if proxy: # Otherwise, the backend should also get floating IPs # so that the controller can access it. floating_ip = True else: # If the controller is acting as a proxy, then it has # direct access to the VMs, so the backend shouldn't # get any floating IPs. floating_ip = False # We need to create a range tuple, so check if # the exposed port is a range. for p in container_info[0]['exposed']: s = p.split("-") if len(s) == 1: sec_group_ports.append( (s[0], s[0]) ) else: sec_group_ports.append( (s[0], s[1]) ) # Also see if there are any ports that should be # open within the cluster (but not outside). Typically # used for IPC (where ports may be assigned within a random range). for p in container_info[0]['internal']: s = p.split("-") if len(s) == 1: internal_ports.append( (s[0], s[0]) ) else: internal_ports.append( (s[0], s[1]) ) # Tell OpenStack to allocate the cluster. resources = self._create_app_stack(cluster_uuid = cluster_uuid, num_instances = len(container_info), security_group_ports = sec_group_ports, internal_ports = internal_ports, assign_floating_ip = floating_ip, ctype = ctype) # Now we need to ask the cluster to start the # Docker containers. containers = [] mounts = {} if resources: # Store the resources cluster ID. self._update_app_db(cluster_uuid, service_uuid, resources) servers = self._get_servers(resources) for i in range(0, len(container_info)): # Fetch a server to run the Docker commands. server = servers[i] # Get the LXC networking options lxc_opts, private_ip = self._get_net_info(server, self.subnet, resources) # Now get an addressable IP address. If we're acting as a proxy within # the same cluster, we can just use the private address. Otherwise # we'll need to route via the public IP address. if proxy: server_ip = private_ip else: server_ip = self._get_public_ip(server, resources) # Verify that the user_data processes all started properly # and that the docker daemon is actually running. If it is # not running, try re-executing. if not self.controller._verify_ferry_server(server_ip): self.controller._execute_server_init(server_ip) # Copy over the public keys, but also verify that it does # get copied over properly. self.controller._copy_public_keys(container_info[i], server_ip) if self.controller._verify_public_keys(server_ip): container, cmounts = self.controller.execute_docker_containers(container_info[i], lxc_opts, private_ip, server_ip) if container: mounts = dict(mounts.items() + cmounts.items()) containers.append(container) else: logging.error("could not copy over ssh key!") return None # Check if we need to set the file permissions # for the mounted volumes. for c, i in mounts.items(): for _, v in i['vols']: self.controller.cmd([c], 'chown -R %s %s' % (i['user'], v)) return containers else: # OpenStack failed to launch the application stack. # This can be caused by improper OpenStack credentials # or if the OpenStack cluster is under heavy load (i.e., # requests are getting timed out). return None def _delete_stack(self, cluster_uuid, service_uuid): # Find the relevant stack information. ips = [] stacks = self.apps.find( { "_cluster_uuid" : cluster_uuid, "_service_uuid" : service_uuid } ) logging.warning("Deleting cluster %s" % str(cluster_uuid)) for stack in stacks: for s in stack.values(): if type(s) is dict and s["type"] == "OS::Heat::Stack": stack_id = s["id"] # To delete the stack properly, we first need to disassociate # the floating IPs. resources = self._collect_resources(stack_id) for r in resources: if r["resource_type"] == "OS::Neutron::FloatingIP": self.neutron.update_floatingip(r["physical_resource_id"], {'floatingip': {'port_id': None}}) # Now try to delete the stack. Wrap this in a try-block so that # we don't completely fail even if the stack doesn't exist. try: logging.warning("Deleting stack %s" % str(stack_id)) self.heat.stacks.delete(stack_id) except HTTPNotFound as e: logging.warning(e) self.apps.remove( { "_cluster_uuid" : cluster_uuid, "_service_uuid" : service_uuid } ) def _stop_stack(self, cluster_uuid, service_uuid): stacks = self.apps.find( { "_cluster_uuid" : cluster_uuid, "_service_uuid" : service_uuid } ) for stack in stacks: servers = self._get_servers(stack) for s in servers: self.nova.servers.stop(s["id"]) def _restart_stack(self, cluster_uuid, service_uuid): ips = [] stacks = self.apps.find( { "_cluster_uuid" : cluster_uuid, "_service_uuid" : service_uuid } ) for stack in stacks: # Find the set of servers and restart them # one by one. It would be nicer if Heat had a way to # restart them all at once, but not sure how to do that... servers = self._get_servers(stack) for s in servers: self.nova.servers.start(s["id"]) ips.append(self._get_public_ip(s, stack)) # Wait for the servers to actually be in the # "running" status before returning. for s in servers: while(True): found = self.nova.servers.list(search_opts = { "name" : s["name"] }) for f in found["servers"]: if f["status"] == "ACTIVE": break time.sleep(4) return ips def quit(self): """ Check if the Heat server is running, and if so go ahead and stop it. """ if self.heatuuid: self.installer.fabric.stop(self.heatuuid, self.heatuuid, [self.heatbox])
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: [email protected] # Maintained By: [email protected] scope = "System" description = """ This role grants a user basic object creation and editing permission. """ owner_base = [ "Categorization", "Category", "ControlCategory", "ControlAssertion", { "type": "Issue", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Assessment", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Control", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "DataAsset", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "AccessGroup", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Directive", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Contract", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Policy", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Regulation", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Standard", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Facility", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Market", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Objective", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, "ObjectDocument", "ObjectOwner", "ObjectPerson", { "type": "Option", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "OrgGroup", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Vendor", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Product", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Section", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Clause", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "SystemOrProcess", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "System", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Process", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "Project", "terms": { "list_property": "owners", "value": "$current_user" }, "condition": "contains" }, { "type": "BackgroundTask", "terms": { "property_name": "modified_by", "value": "$current_user" }, "condition": "is" }, "CustomAttributeDefinition", "CustomAttributeValue", ] owner_read = owner_base + [ { "type": "Relationship", "terms": { "property_name": "source,destination", "action": "read" }, "condition": "relationship", }, "Role", "UserRole", "Context", "Person", ] owner_update = owner_base + [ { "type": "Relationship", "terms": { "property_name": "source,destination", "action": "update" }, "condition": "relationship", }, { "type": "Comment", "terms": { "property_name": "modified_by", "value": "$current_user" }, "condition": "is" }, ] permissions = { "read": owner_read, "create": [ "Workflow" "Categorization", "Category", "ControlCategory", "ControlAssertion", "Control", "Comment", "Assessment", "Issue", "DataAsset", "AccessGroup", "Directive", "Contract", "Policy", "Regulation", "Standard", "Document", "Facility", "Help", "Market", "Objective", "ObjectDocument", "ObjectPerson", "Option", "OrgGroup", "Vendor", "PopulationSample", "Product", "Project", { "type": "Relationship", "terms": { "property_name": "source,destination", "action": "update" }, "condition": "relationship", }, "Section", "Clause", "SystemOrProcess", "System", "Process", { "type": "ObjectOwner", "terms": { "property_name": "ownable.modified_by", "value": "$current_user" }, "condition": "is" }, "Program", "Context", { "type": "BackgroundTask", "terms": { "property_name": "modified_by", "value": "$current_user" }, "condition": "is" }, ], "view_object_page": owner_read, "update": owner_update, "delete": owner_update, }
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from jacket.db import compute as db from jacket.objects import compute as objects from jacket.objects.compute import base from jacket.objects.compute import fields # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceAction(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode VERSION = '1.1' fields = { 'id': fields.IntegerField(), 'action': fields.StringField(nullable=True), 'instance_uuid': fields.UUIDField(nullable=True), 'request_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'start_time': fields.DateTimeField(nullable=True), 'finish_time': fields.DateTimeField(nullable=True), 'message': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, action, db_action): for field in action.fields: action[field] = db_action[field] action._context = context action.obj_reset_changes() return action @staticmethod def pack_action_start(context, instance_uuid, action_name): values = {'request_id': context.request_id, 'instance_uuid': instance_uuid, 'user_id': context.user_id, 'project_id': context.project_id, 'action': action_name, 'start_time': context.timestamp} return values @staticmethod def pack_action_finish(context, instance_uuid): values = {'request_id': context.request_id, 'instance_uuid': instance_uuid, 'finish_time': timeutils.utcnow()} return values @base.remotable_classmethod def get_by_request_id(cls, context, instance_uuid, request_id): db_action = db.action_get_by_request_id(context, instance_uuid, request_id) if db_action: return cls._from_db_object(context, cls(), db_action) @base.remotable_classmethod def action_start(cls, context, instance_uuid, action_name, want_result=True): values = cls.pack_action_start(context, instance_uuid, action_name) db_action = db.action_start(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable_classmethod def action_finish(cls, context, instance_uuid, want_result=True): values = cls.pack_action_finish(context, instance_uuid) db_action = db.action_finish(context, values) if want_result: return cls._from_db_object(context, cls(), db_action) @base.remotable def finish(self): values = self.pack_action_finish(self._context, self.instance_uuid) db_action = db.action_finish(self._context, values) self._from_db_object(self._context, self, db_action) @base.NovaObjectRegistry.register class InstanceActionList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # InstanceAction <= version 1.1 VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('InstanceAction'), } @base.remotable_classmethod def get_by_instance_uuid(cls, context, instance_uuid): db_actions = db.actions_get(context, instance_uuid) return base.obj_make_list(context, cls(), InstanceAction, db_actions) # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class InstanceActionEvent(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: event_finish_with_failure decorated with serialize_args VERSION = '1.1' fields = { 'id': fields.IntegerField(), 'event': fields.StringField(nullable=True), 'action_id': fields.IntegerField(nullable=True), 'start_time': fields.DateTimeField(nullable=True), 'finish_time': fields.DateTimeField(nullable=True), 'result': fields.StringField(nullable=True), 'traceback': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, event, db_event): for field in event.fields: event[field] = db_event[field] event._context = context event.obj_reset_changes() return event @staticmethod def pack_action_event_start(context, instance_uuid, event_name): values = {'event': event_name, 'instance_uuid': instance_uuid, 'request_id': context.request_id, 'start_time': timeutils.utcnow()} return values @staticmethod def pack_action_event_finish(context, instance_uuid, event_name, exc_val=None, exc_tb=None): values = {'event': event_name, 'instance_uuid': instance_uuid, 'request_id': context.request_id, 'finish_time': timeutils.utcnow()} if exc_tb is None: values['result'] = 'Success' else: values['result'] = 'Error' values['message'] = exc_val values['traceback'] = exc_tb return values @base.remotable_classmethod def get_by_id(cls, context, action_id, event_id): db_event = db.action_event_get_by_id(context, action_id, event_id) return cls._from_db_object(context, cls(), db_event) @base.remotable_classmethod def event_start(cls, context, instance_uuid, event_name, want_result=True): values = cls.pack_action_event_start(context, instance_uuid, event_name) db_event = db.action_event_start(context, values) if want_result: return cls._from_db_object(context, cls(), db_event) @base.serialize_args @base.remotable_classmethod def event_finish_with_failure(cls, context, instance_uuid, event_name, exc_val=None, exc_tb=None, want_result=None): values = cls.pack_action_event_finish(context, instance_uuid, event_name, exc_val=exc_val, exc_tb=exc_tb) db_event = db.action_event_finish(context, values) if want_result: return cls._from_db_object(context, cls(), db_event) @base.remotable_classmethod def event_finish(cls, context, instance_uuid, event_name, want_result=True): return cls.event_finish_with_failure(context, instance_uuid, event_name, exc_val=None, exc_tb=None, want_result=want_result) @base.remotable def finish_with_failure(self, exc_val, exc_tb): values = self.pack_action_event_finish(self._context, self.instance_uuid, self.event, exc_val=exc_val, exc_tb=exc_tb) db_event = db.action_event_finish(self._context, values) self._from_db_object(self._context, self, db_event) @base.remotable def finish(self): self.finish_with_failure(self._context, exc_val=None, exc_tb=None) @base.NovaObjectRegistry.register class InstanceActionEventList(base.ObjectListBase, base.NovaObject): VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('InstanceActionEvent'), } @base.remotable_classmethod def get_by_action(cls, context, action_id): db_events = db.action_events_get(context, action_id) return base.obj_make_list(context, cls(context), objects.InstanceActionEvent, db_events)
#*********************************************************************** # This code is part of pyCMPL # # Copyright (C) 2013 - 2016 # Mike Steglich - Technical University of Applied Sciences # Wildau, Germany # # pyCMPL is a project of the Technical University of # Applied Sciences Wildau and the Institute for Operations Research # and Business Management at the Martin Luther University # Halle-Wittenberg. # Please visit the project homepage <www.coliop.org> # # pyCMPL is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyCMPL is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # #********************************************************************** #!/usr/bin/python from __future__ import division "pyCmpl - CMPL's python API " __author__ = "Mike Steglich" __copyright__ = "Copyright (C) 2013,2014,2015, 2016 Mike Steglich" __license__ = "LGPLv3" __version__ = "1.4.6" from math import * import os import threading import random import subprocess import sys import xmlrpclib import time import socket import time import tempfile import cStringIO from CmplDefs import * from CmplException import * from CmplMsg import * from CmplTools import * from CmplSolution import * from CmplSet import * from CmplParameter import * from CmplInstance import * from CmplInfo import * ################################################################################# # # Cmpl # ################################################################################# class Cmpl(threading.Thread): #******************************************************************************** # Constructor and destructor #******************************************************************************** #*********** constructor ********** def __init__(self, model): threading.Thread.__init__(self) self.__compatibility = COMPATIBILITY self.__problem = "" self.__cmplDataStr = cStringIO.StringIO() self.__setList = [] self.__parameterList = [] self.__optionsList = {} self.__status = None self.__solutions = None self.__solutionString = "" self.__cmplMessageString="" self.__cmplInfos = None self.__cmplInfoString = "" self.__cmplFileList = [] self.__dataFileList = [] self.__remoteMode = False self.__remoteStatus = CMPL_UNKNOWN self.__remoteStatusMessage = "" self.__jobId = "" self.__instStr = "" self.__solver="cbc" self.__cmplServer = None self.__cmplUrl = "" self.__cmplGridScheduler = None self.__cmplGridSchedulerUrl = "" self.__serverMode = SERVER_UNKNOWN self.__cmplServerRunning = False self.__maxCmplServerTries = 10 self.__maxCmplServerQueuingTime = 5*60 #in seconds self.__cmplDataFile = None self.__cmplMsgFile = None self.__cmplSolFile = None self.__cmplProc = None self.__refreshTime = 0.5 self.__printOutput = False self.__stringOutput = False self.__outputString = cStringIO.StringIO() self.__isCleaned = False self.__debug = False if type(model) != str: raise CmplException( str(model) + " is not a valid file name for a Cmpl file" ) else: self.__model = model self.__modelDef = None #if not os.path.isfile(model): # raise CmplException("CMPL file " + model + " does not exist." ) self.__id = str(random.randint(100000, 999999)) self.__outputLeadString = os.path.basename(os.path.splitext(self.__model)[0]) + "> " #*********** end constructor ******** #*********** destructor ************* def __del__(self): try: self.__cleanUp() self.__outputString.close() self.__cmplDataStr.close() except: pass #*********** end destructor ********* #******************************************************************************** # public methods #******************************************************************************** #*********** model (getter) ********* @property def modelName(self): return self.__model #*********** end model ************** #*********** problem (getter) ********* @property def problem(self): return self.__problem #*********** end model ************** #*********** refreshTime ************ @property def refreshTime(self): return self.__refreshTime #*********** end refreshTime ******** #*********** output ***************** @property def output(self): return self.__outputString.getvalue() #*********** end output ************ #*********** cmplMessages ************ @property def cmplMessages(self): return self.__status.cmplMessageList #*********** end cmplMessages ******** #*********** cmplMessageString ************ @property def cmplMessageString(self): return self.__cmplMessageString #*********** end cmplMessageString ******** #*********** solutions *************** @property def solutionPool(self): if self.__solutions==None: raise CmplException("No Solution found so far") elif self.__solutions.nrOfSolutions>0: return self.__solutions.solutions else: raise CmplException("No Solution found so far") #*********** end solutions *********** #*********** solutions *************** @property def solution(self): if self.__solutions==None: raise CmplException("No Solution found so far") elif self.__solutions.nrOfSolutions>0: return self.__solutions.solution else: raise CmplException("No Solution found so far") #*********** end solutions *********** #*********** nrOfVariables *************** @property def nrOfVariables(self): if self.__solutions==None: raise CmplException("The model isn't generated yet.") else: return self.__solutions.nrOfVariables #*********** end nrOfVariables *********** #*********** nrOfConstraints *************** @property def nrOfConstraints(self): if self.__solutions==None: raise CmplException("The model isn't generated yet.") else: return self.__solutions.nrOfConstraints #*********** end nrOfConstraints *********** #*********** isIntegerProgram *************** @property def isIntegerProgram(self): if self.__solutions==None: raise CmplException("The model isn't generated yet.") else: return self.__solutions.isIntegerProgram #*********** end isIntegerProgram *********** #*********** objectiveName *************** @property def objectiveName(self): if self.__solutions==None: raise CmplException("No Solution found so far") else: return self.__solutions.objectiveName #*********** end objectiveName *********** #*********** objectiveSense *************** @property def objectiveSense(self): if self.__solutions==None: raise CmplException("No Solution found so far") else: return self.__solutions.objectiveSense #*********** end objectiveSense *********** #*********** nrOfSolutions **************** @property def nrOfSolutions(self): if self.__solutions==None: raise CmplException("No Solution found so far") else: return self.__solutions.nrOfSolutions #*********** end nrOfSolutions ************* #*********** solver ************************ @property def solver(self): if self.__solutions==None: raise CmplException("Since the model isn't solved the solver is not known.") else: return self.__solutions.solver #*********** end solver ******************** #*********** solverMessage ***************** @property def solverMessage(self): if self.__solutions==None: raise CmplException("Since the model isn't solved the solver message is not known.") else: return self.__solutions.solverMessage #*********** end nrOfSolutions ************* #*********** varDisplayOptions ************* @property def varDisplayOptions(self): if self.__solutions==None: raise CmplException("Since the model isn't solved this option isn't known.") else: return self.__solutions.varDisplayOptions #*********** end varDisplayOptions ********* #*********** conDisplayOptions ************* @property def conDisplayOptions(self): if self.__solutions==None: raise CmplException("Since the model isn't solved this option isn't known.") else: return self.__solutions.conDisplayOptions #*********** end conDisplayOptions ********* #*********** cmplStatus ************** @property def cmplStatus(self): if self.__remoteMode and self.__remoteStatus!=CMPL_UNKNOWN: return self.__remoteStatus else: return self.__status.cmplStatus #*********** end cmplStatus *********** #*********** cmplStatusText *********** @property def cmplStatusText(self): status = 0 if self.__remoteMode and self.__remoteStatus!=CMPL_UNKNOWN: status = self.__remoteStatus else: status = self.__status.cmplStatus return CMPL_STATUS_TXT[status] #*********** end cmplStatusText ****** #*********** solverStatus ************ @property def solverStatus(self): if self.__solutions.nrOfSolutions==0: return SOLVER_FAILED else: return SOLVER_OK #*********** end solverStatus ******** #*********** solverStatusText ******** @property def solverStatusText(self): if self.__solutions.nrOfSolutions==0: return "SOLVER_FAILED" else: return "SOLVER_OK" #*********** end solverStatusText **** #*********** cmplSolFile ************* @property def cmplSolFile(self): return self.__solutions.cmplSolFile #********* end cmplSolFile *********** #*********** csvSolFile ************** @property def csvSolFile(self): return self.__solutions.csvSolFile #********* end csvSolFile ************ #*********** asciiSolFile ************ @property def asciiSolFile(self): return self.__solutions.asciiSolFile #********* end asciiSolFile ********** #*********** jobId ******************* @property def jobId(self): return self.__jobId #*********** end jobId ************** #*********** setModel ********** def setModel(self, modelDef=''): self.__modelDef=modelDef #*********** end setModel ****** #*********** model ******************* def model(self, line=None): if line==None: return self.__modelDef else: modList = self.__modelDef.split('\n') if line < len(modList): return self.__modelDef.split('\n')[line] else: return ' ' #*********** end model ************** #*********** setOutputPipe ********** def setOutput(self, ok=False, lStr=None): self.__printOutput = ok if lStr != None: self.__outputLeadString = lStr #*********** end setOutputPipe ****** #*********** setOutputPipe ********** def setOutputToString(self, ok=False, lStr=None): self.__stringOutput = ok if lStr != None: self.__outputLeadString = lStr #*********** end setOutputPipe ****** #*********** setRefreshTime ********* def setRefreshTime(self, rTime): self.__refreshTime = rTime #*********** end setRefreshTime ***** #*********** setSet ***************** def setSet(self, set): if type(set) != CmplSet: raise CmplException(str(set)+ " is not a CMPL set ") else: if len(set.valueList) != 0: self.__setList.append(set) else: raise CmplException("set " + set.name()+ " contains no elements ") #*********** end setSet ************** #*********** setSets ***************** def setSets(self, *set): for s in set: self.setSet(s) #*********** end setSets ************* #*********** setParameter ************ def setParameter(self, param): if type(param) != CmplParameter: raise CmplException("Cmpl.setParameter: " + str(param)+ " is not a CMPL parameter ") else: if len(param.values) != 0: self.__parameterList.append(param) else: raise CmplException("parameter " + param.name()+ " contains no elements ") #*********** end setParameter ******** #*********** setSets ***************** def setParameters(self, *params): for p in params: self.setParameter(p) #*********** end setSets ************* #*********** setOption *************** def setOption(self, option): if type(option) != str: raise CmplException(str(option)+ " is not a valid CMPL option ") else: pos = len(self.__optionsList) self.__optionsList.update({pos:option}) return pos #*********** end setOption *********** #*********** setOption *************** def setOptionsList(self, optionList): if type(optionList) != dict: raise CmplException("Wrong option list ") else: self.__optionsList=optionList #*********** end setOption *********** #*********** delOption *************** def delOption(self, pos): if pos not in self.__optionsList: raise CmplException(str(pos)+" is not a valid CMPL option ") else: del self.__optionsList[pos] #*********** end delOption *********** #*********** delOptions *************** def delOptions(self): self.__optionsList = {} #*********** end delOptions *********** #**** setMaxServerQueuingTime ********* def setMaxServerQueuingTime(self, qTime): self.__maxCmplServerQueuingTime = qTime #** end setMaxServerQueuingTime ******* #**** maxServerQueuingTime ************ @property def maxServerQueuingTime(self): return self.__maxCmplServerQueuingTime #** end maxServerQueuingTime ********** #**** setMaxServerTries *************** def setMaxServerTries(self, tries): self.__maxCmplServerTries = tries #** end setMaxServerTries ************* #**** maxServerTries ****************** @property def maxServerTries(self): return self.__maxCmplServerTries #** end maxServerTries **************** #*********** debug ******************* def debug(self, mode=True): self.__debug = mode #*********** end debug *************** #*********** getVarByName ************ def getVarByName(self, name, solNr=0): if solNr<0 or solNr>self.__solutions.nrOfSolutions-1: raise CmplException("Solution with index " + str(solNr) + " doesn't exist.") s = self.__solByNr(solNr) return self.__getElementByName(name,s.variables) #*********** end getVarByName ******** #*********** getConByName ************ def getConByName(self, name, solNr=0): if solNr<0 or solNr>self.__solutions.nrOfSolutions-1: raise CmplException("Solution with index " + str(solNr) + " doesn't exist.") s = self.__solByNr(solNr) return self.__getElementByName(name,s.constraints) #*********** end getConByName ******** #*********** varByName *************** def varByName(self, solNr=0): if solNr<0 or solNr>self.__solutions.nrOfSolutions-1: raise CmplException("Solution with index " + str(solNr) + " doesn't exist.") s = self.__solByNr(solNr) self.__elementByName(s.variables) #*********** end varByName *********** #*********** conByName *************** def conByName(self, solNr=0): if solNr<0 or solNr>self.__solutions.nrOfSolutions-1: raise CmplException("Solution with index " + str(solNr) + " doesn't exist.") s = self.__solByNr(solNr) self.__elementByName(s.constraints) #*********** end conByName *********** #*********** solve ******************* def solve(self): if self.__modelDef != None: self.__writeCmplFile() if self.__remoteMode: if not self.__cmplServerRunning: raise CmplException("Model is not connected to a CmplServer" ) self.__status = CmplMessages() self.__solutions = CmplSolutions() tries=0 while True: #loop is intended for CMPLGrid self.__isCleaned=False try: if self.__remoteStatus == CMPLSERVER_CLEANED: self.connect(self.__cmplUrl) if self.__serverMode == CMPL_GRID and self.__remoteStatus == CMPLSERVER_ERROR: self.connect(self.__cmplUrl) if self.__remoteStatus == CMPLGRID_SCHEDULER_BUSY: startTime=time.time() while self.__remoteStatus != CMPLGRID_SCHEDULER_OK: time.sleep(self.__refreshTime) self.__knockScheduler() if (time.time()-startTime)>=self.__maxCmplServerQueuingTime: self.__cleanUp() raise CmplException("maximum CmplServer queuing time is exceeded.") self.send() if self.__debug: instFile = self.__problem+".cinst" try: f = open(instFile, 'w') f.write(self.__instStr) f.close() except IOError, e: raise CmplException("IO error for file " + instFile + ": "+e) if self.__remoteStatus == CMPLSERVER_BUSY: startTime=time.time() while self.__remoteStatus!=PROBLEM_FINISHED: self.knock() time.sleep(self.__refreshTime) if self.__remoteStatus == CMPLSERVER_BUSY and (time.time()-startTime)>=self.__maxCmplServerQueuingTime: raise CmplException("maximum CmplServer queuing time is exceeded.") self.retrieve() break except CmplException, e: if self.__serverMode == CMPL_GRID and self.__remoteStatus == CMPLSERVER_ERROR and self.__cmplGridSchedulerUrl!="": try: self.__cmplServerExecute("cmplServerFailed") self.__handleOutput("CmplServer failed <"+self.__cmplUrl+">: Problem will be newly connected to CmplGridScheduler and commited to another CmplServer." ) self.__cmplUrl = self.__cmplGridSchedulerUrl self.__cmplGridSchedulerUrl = "" self.__cmplGridScheduler=None self.__cmplServer=None except CmplException, e: raise CmplException("CmplGridScheduler failed: " + e.msg ) tries+=1 if tries==self.__maxCmplServerTries: raise CmplException(e.msg ) continue else: raise CmplException(e.msg) else: if not os.path.isfile(self.__model): raise CmplException("CMPL file " + self.__model + " does not exist." ) self.__problem = os.path.splitext(self.__model)[0] self.__cmplDataFile = self.__problem + "_" + self.__id+".cdat" self.__cmplMsgFile = self.__problem + "_" + self.__id+".cmsg" self.__cmplSolFile = self.__problem + "_" + self.__id+".csol" self.__cmplDataElements() self.__status = CmplMessages(self.__cmplMsgFile) self.__solutions = CmplSolutions(self.__cmplSolFile) tmpAlias = os.path.basename(self.__problem + "_" + self.__id) try: cmplBin=os.environ['CMPLBINARY'] if not os.path.exists(cmplBin): raise CmplException("Can't find Cmpl binary: " + cmplBin ) except: raise CmplException("Can't find Cmpl binary" ) cmdList = [cmplBin, self.__model, "-solution", "-e", "-alias", tmpAlias ] if len(self.__optionsList) != 0: for opt in self.__optionsList: cmdList.append("-headerOpt") cmdList.append(self.__optionsList[opt].replace(" ", "#") ) self.__cmplProc = subprocess.Popen(cmdList, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #for line in iter(self.__cmplProc.stdout.readline, ''): # self.__handleOutput(line) #while self.__cmplProc.poll() is None: # self.__handleOutput(self.__cmplProc.stdout.readline()) while True: line = self.__cmplProc.stdout.readline() if len(line)>0: self.__handleOutput(line) else: break if self.__cmplProc.wait() != 0: raise CmplException(self.__cmplProc.stderr.read() ) self.__status.readCmplMessages() if self.__status.cmplStatus==CMPL_FAILED: raise CmplException("Cmpl finished with errors", self.__status.cmplMessageList ) self.__solutions.readSolution() self.__writeSolFiles() self.__cleanUp() #*********** end solve *************** #*********** run ******************** def run(self): try: self.solve() except CmplException, e: sys.stderr.write(e.msg+"\n") #*********** end run ***************** #*********** connect **************** def connect(self, cmplUrl): if self.__remoteStatus != CMPL_UNKNOWN and self.__remoteStatus != CMPLSERVER_CLEANED and self.__serverMode != CMPL_GRID: raise CmplException("Problem is still connected with CMPLServer: at " + cmplUrl + " with jobId " + self.__jobId) tries=0 while True: self.__remoteMode = True self.__remoteStatus = CMPL_UNKNOWN self.__serverMode = STANDALONE_SERVER self.__cmplServer=None self.__cmplUrl=cmplUrl self.__cmplGridSchedulerUrl = "" self.__cmplGridScheduler=None try: self.__cmplServer = xmlrpclib.ServerProxy(cmplUrl) self.__getSolver() ret = self.__cmplServer.getJobId(os.path.basename(escape(self.__model)), self.__solver, self.__compatibility) self.__remoteStatus=ret[0] if self.__remoteStatus == CMPLSERVER_OK or self.__remoteStatus == CMPLGRID_SCHEDULER_OK or self.__remoteStatus == CMPLGRID_SCHEDULER_BUSY: self.__jobId = ret[2] if self.__remoteStatus == CMPLSERVER_OK or self.__remoteStatus == CMPLSERVER_ERROR: self.__serverMode = STANDALONE_SERVER elif self.__remoteStatus==CMPLGRID_SCHEDULER_OK or self.__remoteStatus==CMPLGRID_SCHEDULER_ERROR or self.__remoteStatus==CMPLGRID_SCHEDULER_BUSY: self.__serverMode = CMPL_GRID self.__handleOutput("Connected with CmplGridScheduler at " + self.__cmplUrl + " with jobId " + self.__jobId ) if self.__remoteStatus==CMPLGRID_SCHEDULER_OK: self.__connectServerViaScheduler(ret[1]) self.__serverMode = CMPL_GRID except: tries+=1 ret = str(sys.exc_info()[1]) self.__cmplServer=None #only relevant for pyCmpl 1.0 if ret.find("getJobId()")> -1: raise CmplException("Incompatible CmplServer - please install a newer CMPLServer" ) if self.__remoteStatus != CMPL_UNKNOWN: if tries==self.__maxCmplServerTries: raise CmplException("CmplServer error: " + ret ) else: continue else: raise CmplException("CmplServer error: " + ret ) break if self.__remoteStatus == CMPLSERVER_ERROR or self.__remoteStatus==CMPLGRID_SCHEDULER_ERROR : self.__cleanUp() raise CmplException(ret[1]) self.__cmplServerRunning =True #if self.__remoteStatus != CMPLGRID_SCHEDULER_BUSY : if self.__serverMode != CMPL_GRID: self.__handleOutput("Connected with CmplServer at " + self.__cmplUrl + " with jobId " + self.__jobId + \ " >> maxServerTries <" + str(self.__maxCmplServerTries)+"> maxQueuingTime <"+str(self.__maxCmplServerQueuingTime)+">") if self.__remoteStatus == CMPLGRID_SCHEDULER_BUSY: self.__handleOutput(ret[1]) #*********** end connect ************* #*********** connect **************** def disconnect(self): self.__cleanUp() self.__remoteStatus = CMPL_UNKNOWN self.__cmplServer=None self.__cmplUrl="" self.__remoteMode=False self.__serverMode = STANDALONE_SERVER #*********** end connect ************* #*********** send ******************** def send(self): if self.__remoteMode: if not self.__cmplServerRunning: raise CmplException("Model is not connected to a CmplServer" ) if self.__remoteStatus == CMPLSERVER_CLEANED: self.connect(self.__cmplUrl) self.knock() if self.__remoteStatus==PROBLEM_RUNNING: raise CmplException("Don't send the problem again before the CmplServer finished the previous one" ) self.__problem = os.path.splitext(self.__model)[0] #self.__cmplSolFile = self.__problem+".csol" self.__cmplDataElements() self.__status = CmplMessages() self.__solutions = CmplSolutions() self.__cmplInfos = CmplInfo() self.__cmplInstance=CmplInstance() self.__instStr = self.__cmplInstance.cmplInstanceStr(self.__model, self.__optionsList.values(),self.__cmplDataStr.getvalue(), self.__jobId ) ret = self.__cmplServerExecute("send") self.__remoteStatus=ret[0] if self.__remoteStatus == CMPLSERVER_ERROR: self.__cleanUp() raise CmplException(ret[1]) else: raise CmplException("Cmpl::send can only be used in remote mode" ) #*********** end send **************** #*********** knock ******************* def knock(self): if self.__remoteMode: if not self.__cmplServerRunning: raise CmplException("Model is not connected to a CmplServer" ) if self.__remoteStatus == CMPLSERVER_CLEANED: raise CmplException("Model was received and cleaned on the CmplServer" ) if self.__remoteStatus!=PROBLEM_CANCELED: ret = self.__cmplServerExecute("knock") self.__remoteStatus=ret[0] if self.__remoteStatus == CMPLSERVER_ERROR or self.__remoteStatus == CMPL_FAILED: self.__cleanUp() raise CmplException(ret[1] ) self.__handleOutput(ret[2]) else: raise CmplException("Cmpl::knock can only be used in remote mode" ) #*********** end knock *************** #*********** retrieve **************** def retrieve(self): if self.__remoteMode: if not self.__cmplServerRunning: raise CmplException("Model is not connected to a CmplServer" ) if self.__remoteStatus == CMPLSERVER_CLEANED: raise CmplException("Model was received and cleaned from the CmplServer" ) if self.__remoteStatus == CMPL_UNKNOWN: self.knock() if self.__remoteStatus==PROBLEM_FINISHED: ret = self.__cmplServerExecute("getCmplMessages") self.__remoteStatus=ret[0] if self.__remoteStatus == CMPLSERVER_ERROR: self.__cleanUp() raise CmplException(ret[1] ) else: self.__status.readCmplMessages(ret[2]) self.__cmplMessageString=ret[2] if self.__status.cmplStatus==CMPL_FAILED: self.__cleanUp() raise CmplException("Cmpl finished with errors", self.__status.cmplMessageList ) ret = self.__cmplServerExecute("getSolutions") if self.__remoteStatus == CMPLSERVER_ERROR: self.__cleanUp() raise CmplException(ret[1] ) else: self.__solutionString=ret[2] self.__solutions.readSolution(self.__solutionString) self.__writeSolFiles() ret = self.__cmplServerExecute("getCmplInfo") self.__remoteStatus = ret[0] if self.__remoteStatus == CMPLSERVER_ERROR: self.__cleanUp() raise CmplException(ret[1] ) else: self.__cmplInfoString=ret[2] self.__cmplInfos.readCmplInfo(self.__cmplInfoString) self.__writeInfoFiles() self.__cleanUp() else: if self.__remoteStatus==PROBLEM_CANCELED: raise CmplException("Model has been canceled by user, cannot retrieve the solutions") else: raise CmplException("Model is still running, cannot retrieve the solutions") else: raise CmplException("Cmpl::retrieve can only be used in remote mode" ) #if self.__serverMode == CMPL_GRID: # self.disconnect() #*********** end retrieve ************ #*********** cancel ****************** def cancel(self): if self.__remoteMode: if not self.__cmplServerRunning: raise CmplException("Model is not connected to a CmplServer" ) if self.__remoteStatus == CMPLSERVER_CLEANED: raise CmplException("Model has been received and cleaned from the CmplServer" ) if self.__remoteStatus!=PROBLEM_CANCELED: ret = self.__cmplServerExecute("cancel") self.__remoteStatus=ret[0] if self.__remoteStatus == CMPLSERVER_ERROR: self.__cleanUp() raise CmplException(ret[1]) ret = self.__cmplServerExecute("removeProblem") if self.__remoteStatus == CMPLSERVER_ERROR: self.__cleanUp() raise CmplException(ret[1]) self.__remoteStatus=CMPLSERVER_CLEANED else: self.__cleanUp() #*********** end cancel ************** #*********** saveSolution ************ def saveSolution(self, solFileName=None): if self.__solutions.nrOfSolutions > 0: if solFileName==None: solFile = self.__problem+".csol" else: solFile=solFileName if not self.__remoteMode: self.__solutionString= self.__solutions.solFileContent try: f = open(solFile, 'w') f.write(self.__solutionString) f.close() self.__solutions.delSolFileContent() except IOError, e: raise CmplException("IO error for file " + solFile + ": "+str(e)) self.__handleOutput( "Solution written to cmplSolution file: " + solFile ) else: raise CmplException("No Solution found so far") #*********** end saveSolution ******** #*********** saveSolutionAscii ******* def saveSolutionAscii(self, solFileName=None): if self.__solutions.nrOfSolutions > 0: if solFileName==None: solFile = self.__problem+".sol" else: solFile=solFileName self.solutionReport(solFile) self.__handleOutput( "Solution written to ASCII file: " + solFile ) else: raise CmplException("No Solution found so far") #*********** saveSolutionAscii ******* #*********** solutionReport ********** def solutionReport(self, fileName=None): repStr=cStringIO.StringIO() if self.__solutions.nrOfSolutions > 0: repStr.write("---------------------------------------------------------------------------------------------------------\n") repStr.write('%-20s %-s\n' % ("Problem", os.path.basename(self.__model))) repStr.write('%-20s %-s\n' % ("Nr. of variables" , str(self.__solutions.nrOfVariables))) repStr.write('%-20s %-s\n' % ("Nr. of constraints" , str(self.__solutions.nrOfConstraints))) repStr.write('%-20s %-s\n' % ("Objective name" , self.__solutions.objectiveName)) if self.__solutions.nrOfSolutions > 1: repStr.write('%-20s %-s\n' % ("Nr. of solutions" , str(self.__solutions.nrOfSolutions))) repStr.write('%-20s %-s\n' % ("Solver name" , self.__solutions.solver)) repStr.write('%-20s %-s\n' % ("Display variables" , self.__solutions.varDisplayOptions)) repStr.write('%-20s %-s\n' % ("Display constraints" , self.__solutions.conDisplayOptions)) repStr.write('---------------------------------------------------------------------------------------------------------\n') for s in self.__solutions.solutions: repStr.write('\n') if self.__solutions.nrOfSolutions > 1: repStr.write('%-20s %-s\n' % ("Solution nr.", str(s.idx+1))) repStr.write('%-20s %-s\n' % ("Objective status", s.status)) repStr.write('%-20s %-20s(%s!)\n' % ("Objective value", "%-20.2f" % s.value, self.__solutions.objectiveSense)) repStr.write('\n') if len(s.variables)>0: repStr.write('%-20s\n' % "Variables") repStr.write('%-20s%5s%20s%20s%20s%20s\n' % ( "Name" , "Type" , "Activity", "LowerBound", "UpperBound" , "Marginal")) repStr.write('---------------------------------------------------------------------------------------------------------\n') for v in s.variables: if v.type=="C": repStr.write('%-20s%5s%20.2f%20.2f%20.2f' % ( v.name , v.type ,v.activity, v.lowerBound,v.upperBound )) else: repStr.write('%-20s%5s%20g%20.2f%20.2f' % ( v.name , v.type ,v.activity, v.lowerBound,v.upperBound )) if self.__solutions.isIntegerProgram: repStr.write('%20s\n' % "-") else: repStr.write('%20.2f\n' % v.marginal) repStr.write('---------------------------------------------------------------------------------------------------------\n') if len(s.constraints)>0: repStr.write('\n') repStr.write('%-20s\n' % "Constraints") repStr.write('%-20s%5s%20s%20s%20s%20s\n' % ( "Name" , "Type" , "Activity", "LowerBound", "UpperBound" , "Marginal")) repStr.write('---------------------------------------------------------------------------------------------------------\n') for c in s.constraints: repStr.write('%-20s%5s%20.2f%20.2f%20.2f' % ( c.name , c.type ,c.activity, c.lowerBound,c.upperBound )) if self.__solutions.isIntegerProgram: repStr.write('%20s\n' % "-") else: repStr.write('%20.2f\n' % c.marginal) repStr.write('---------------------------------------------------------------------------------------------------------\n') if fileName!=None: try: f = open(fileName, 'w') f.write(repStr.getvalue()) f.close() except IOError, e: raise CmplException("IO error for file " + fileName + ": " +str(e)) else: print repStr.getvalue() repStr.close() else: raise CmplException("No Solution found so far") #*********** end solutionReport ****** #*********** saveSolutionCsv ********** def saveSolutionCsv(self, solFileName=None): if self.__solutions.nrOfSolutions > 0: if solFileName==None: solFile = self.__problem+".csv" else: solFile=solFileName try: f = open(solFile, 'w') f.write("CMPL csv export\n") f.write("\n") f.write("%s;%s\n" % ("Problem", os.path.basename(self.__model))) f.write("%s;%g\n" % ("Nr. of variables" , self.__solutions.nrOfVariables)) f.write("%s;%g\n" % ("Nr. of constraints" , self.__solutions.nrOfConstraints)) f.write("%s;%s\n" % ("Objective name" , self.__solutions.objectiveName)) if self.__solutions.nrOfSolutions > 1: f.write("%s;%g\n" % ("Nr. of solutions" , self.__solutions.nrOfSolutions)) f.write("%s;%s\n" % ("Solver name" , self.__solutions.solver)) f.write("%s;%s\n" % ("Display variables" , self.__solutions.varDisplayOptions)) f.write("%s;%s\n" % ("Display constraints" , self.__solutions.conDisplayOptions)) for s in self.__solutions.solutions: f.write("\n") if self.__solutions.nrOfSolutions > 1: f.write("%s;%g\n" % ("Solution nr", s.idx+1)) f.write("%s;%s\n" % ("Objective status", s.status)) f.write("%s;%f;(%s!)\n" % ("Objective value", s.value, self.__solutions.objectiveSense)) if len(s.variables)>0: f.write("%s\n" % "Variables") f.write("%s;%s;%s;%s;%s;%s\n" % ( "Name" , "Type" , "Activity", "LowerBound", "UpperBound" , "Marginal")) for v in s.variables: if v.type=="C": f.write("%s;%s;%f;%f;%f" % ( v.name , v.type ,v.activity, v.lowerBound,v.upperBound )) else: f.write("%s;%s;%g;%f;%f" % ( v.name , v.type ,v.activity, v.lowerBound,v.upperBound )) if self.__solutions.isIntegerProgram: f.write(";-\n") else: f.write (";%f\n" % v.marginal) if len(s.constraints)>0: f.write("%s\n" % "Constraints") f.write("%s;%s;%s;%s;%s;%s\n" % ( "Name" , "Type" , "Activity", "LowerBound", "UpperBound" , "Marginal")) for c in s.constraints: f.write("%s;%s;%f;%f;%f" % ( c.name , c.type ,c.activity, c.lowerBound,c.upperBound )) if self.__solutions.isIntegerProgram: f.write(";-\n") else: f.write (";%f\n" % c.marginal) f.close() self.__handleOutput( "Solution written to CSV file: " + solFile ) except IOError, e: raise CmplException("IO error for file " + tmpName + ": "+e) else: raise CmplException("No Solution found so far") #*********** end saveSolutionCsv ***** #*********** saveDataFile ************ def saveDataFile(self, dataFileName=None): if dataFileName==None: dataFile = self.__problem+".cdat" else: dataFile=solFileName try: f = open(dataFile, 'w') f.write(self.__cmplDataStr.getvalue()) f.close() self.__handleOutput( "CmplData file written to file: " + dataFile ) except IOError, e: raise CmplException("IO error for file " + instFile + ": "+e) #*********** end saveDataFile ************ #*********** saveCmplMessageFile ************ def saveCmplMessageFile(self, msgFileName=""): if self.__cmplInfos.statisticsFileName == "": fName=self.__problem+".cmsg" else: fName=msgFileName self.__writeAsciiFile(fName, self.__cmplMessageString) self.__handleOutput( "CmplMessages written to file: " + fName ) #*********** saveCmplMessageFile ************ #******************************************************************************** # private methods * #******************************************************************************** #*********** cleanUp **************** def __cleanUp(self): if not self.__isCleaned: if self.__debug: raw_input("Hit Enter to exit") if self.__remoteMode: if not (self.__serverMode == CMPL_GRID and self.__cmplGridSchedulerUrl==""): #if not (self.__remoteStatus==PROBLEM_FINISHED or self.__remoteStatus==CMPLSERVER_ERROR or self.__remoteStatus==PROBLEM_CANCELED): if (self.__remoteStatus!=PROBLEM_FINISHED and self.__remoteStatus!=CMPLSERVER_ERROR and self.__remoteStatus!=PROBLEM_CANCELED): if self.__cmplServerRunning: self.__cmplServerExecute("cancel") if ( self.__remoteStatus!=CMPLSERVER_CLEANED and self.__remoteStatus!=CMPLSERVER_ERROR): self.__cmplServerExecute("removeProblem") self.__remoteStatus=CMPLSERVER_CLEANED if self.__serverMode == CMPL_GRID: if self.__remoteStatus == CMPLGRID_SCHEDULER_BUSY or self.__remoteStatus == CMPLGRID_SCHEDULER_UNKNOWN: self.__cmplServerExecute("disconnectFromScheduler") elif self.__cmplProc!=None: if self.__cmplProc.poll() == None: self.__cmplProc.kill() if self.__cmplDataFile != None and not self.__remoteMode: if os.path.isfile(self.__cmplDataFile): os.remove(self.__cmplDataFile) if self.__cmplMsgFile != None: if os.path.isfile(self.__cmplMsgFile): os.remove(self.__cmplMsgFile) if self.__cmplSolFile != None: if os.path.isfile(self.__cmplSolFile): os.remove(self.__cmplSolFile) self.__isCleaned = True #*********** end cleanUp ************ #*********** writeCmplDataFile ******* def __cmplDataElements(self): try: for s in self.__setList: self.__cmplDataStr.write("%" + s.name) if s.rank>1: self.__cmplDataStr.write(' set['+str(s.rank)+'] < ' ) else: self.__cmplDataStr.write(' set < ' ) if s.type==0: self.__cmplDataStr.write('\n' ) count = 1 for i in s.valueList: if type(i) == str: self.__cmplDataStr.write("\""+ i +'\" ') elif type(i) == float: self.__cmplDataStr.write(str(long(i)) +' ') elif (type(i) == int or type(i) == long): self.__cmplDataStr.write(str(i)+' ') else: self.__cmplDataStr.write("\""+str(i)+"\" ") if count == s.rank: self.__cmplDataStr.write('\n') count=1 else: count+=1 if s.type==1: self.__cmplDataStr.write('\n' ) for i in s.valueList: for j in i: if type(j) == list: raise CmplException("set " + s.name+ " contains unexpected data " + str(j)) if type(j) == str: self.__cmplDataStr.write("\""+j+"\" ") elif type(j) == float: self.__cmplDataStr.write(str(long(j))+' ') elif (type(j) == int or type(j) == long): self.__cmplDataStr.write(str(j)+' ') else: self.__cmplDataStr.write("\""+str(j)+"\" ") self.__cmplDataStr.write('\n') if s.type==2: self.__cmplDataStr.write(str(s.valueList[0]) + '..' + str(s.valueList[1]) + ' ') if s.type==3: self.__cmplDataStr.write(str(s.valueList[0]) + '(' + str(s.valueList[1]) + ')' + str(s.valueList[2]) + ' ') self.__cmplDataStr.write('>\n' ) for p in self.__parameterList: self.__cmplDataStr.write('%'+p.name) if p.rank > 0: self.__cmplDataStr.write("[") pos = 0 for s in p.setList: setFound = False for j in self.__setList: if j.name == s.name: setFound = True break if not setFound: raise CmplException("The set " + s.name + " used for the parameter " + p.name+ " doesn't exist.") else: self.__cmplDataStr.write(s.name) if pos < len (p.setList) -1: self.__cmplDataStr.write(",") pos += 1 self.__cmplDataStr.write('] ') if 'LIST' in str(type(p.values)).upper(): #if type(p.values)==list: self.__cmplDataStr.write(' <\n') for e in p.values: if type(e) == list: self.__cmplDataStr.write(self.__writeListElements(e)) else: if type(e) == str: self.__cmplDataStr.write('\"' + e + "\" \n") else: self.__cmplDataStr.write(str(e)+ '\n') elif 'DICT' in str(type(p.values)).upper(): #elif type(p.values)==dict: if p.defaultValue!=None: if type(p.defaultValue) == str: self.__cmplDataStr.write(' = \"'+ p.defaultValue +'\" ') else: self.__cmplDataStr.write(' = ' + str(p.defaultValue) +' ') self.__cmplDataStr.write(' indices <\n') for e in p.values: if type(e) == tuple: for i in e: if type(i) == str: self.__cmplDataStr.write("\""+ i +'\" ') elif type(i) == float: self.__cmplDataStr.write(str(long(i)) +' ') else: self.__cmplDataStr.write(str(i) +' ') elif type(e) == str: self.__cmplDataStr.write('\"' + e + "\" ") elif type(e) == float: self.__cmplDataStr.write(str(long(e)) +' ') else: self.__cmplDataStr.write(str(e) +' ') if type(p.values[e]) == str: self.__cmplDataStr.write('\"' + p.values[e] + "\" \n") else: self.__cmplDataStr.write(str(p.values[e])+ '\n') self.__cmplDataStr.write(">\n") else: self.__cmplDataStr.write(" < " ) if type(p.values[0]) == str: self.__cmplDataStr.write('\"' + p.values[0] + "\" >\n") else: self.__cmplDataStr.write(str(p.values[0] ) + " >\n") if self.__cmplDataStr.getvalue()!="": if self.__remoteMode: pass else: f = open(self.__cmplDataFile, 'w') f.write(self.__cmplDataStr.getvalue()) f.close() else: self.__cmplDataFile=None except IOError, e: raise CmplException("IO error for cmplDateFile " + self.__cmplDataFile + ": "+e) #*********** end writeCmplDataFile *** #*********** writeListElements ******* def __writeListElements(self, valList): tmpStr=cStringIO.StringIO() for v in valList: if type(v) == list: tmpStr.write(self.__writeListElements( v)) else: if type(v) == str: tmpStr.write('\"'+v+'\" ') else: tmpStr.write(str(v) + ' ') tmpStr.write('\n') tStr=tmpStr.getvalue() tmpStr.close() return tStr #*********** end __writeListElements **** #*********** __getSolver **************** def __getSolver(self): solverFound=False for o in self.__optionsList.values(): if "-solver" in o: self.__solver=o.split()[2].replace("\"","") solverFound=True break if not solverFound: if self.__modelDef!=None: lines = self.__modelDef.split('\n') for line in lines: if line.strip().startswith("%arg"): if "-solver" in line: self.__solver=line.split()[2].replace("\"","") else: try: f = open(self.__model , "r") for line in f: if line.strip().startswith("%arg"): if "-solver" in line: self.__solver=line.split()[2].replace("\"","") except IOError, e: raise CmplException( "Cannot read Cmpl file <"+self.__model+"> " + str(e) ) #*********** end __getSolver *********** #*********** __solByNr *************** def __solByNr(self, solNr): if self.__solutions.nrOfSolutions>0: if solNr <= self.__solutions.nrOfSolutions: return self.__solutions.solutions[solNr] else: raise CmplException("Solution with number: " + str(solNr) + " doesn't exist.") else: raise CmplException("No Solution found so far") #*********** end __solByNr *********** #*********** getElementByName *********** def __getElementByName(self, name, solObj): if self.__solutions.nrOfSolutions>0: solElements = [] solElement = None isArray = False isFound = False for e in solObj: if e.name.startswith(name): if e.name.find("[") != -1: if e.name.split('[')[0]==name: isArray = True solElements.append(e) isFound = True else: if e.name==name: isArray = False solElement = e isFound = True if not isFound: raise CmplException(name + " does not exist.") if isArray: return solElements else: return solElement else: raise CmplException("No Solution found so far") #*********** end getElementByName ***** #*********** elementByName *********** def __elementByName(self, solObj): elemFound = [] for e in solObj: tmpName = "" tmpSet = "" pos = e.name.find("[") if pos != -1: tmpName = e.name[:pos] tmpSet = e.name[pos+1:-1].split(',') tmpSetStr=cStringIO.StringIO() tmpSetStr.write("(") sNr=1 for s in tmpSet: if sNr>1: tmpSetStr.write(",") if CmplTools.strIsNumber(s): tmpSetStr.write(s) else: tmpSetStr.write("\""+ s +"\"") sNr+=1 tmpSetStr.write(")") if tmpName in elemFound: exec "self."+ tmpName + ".update({"+tmpSetStr.getvalue()+": e})" else: elemFound.append(tmpName) exec "self."+ tmpName + "={"+tmpSetStr.getvalue()+": e}" else: tmpName = e.name exec "self."+tmpName + "=e" tmpSetStr.close() #*********** end elementByName ******* #*********** __handleOutput ************ def __handleOutput(self, oStr): if oStr!="": if self.__printOutput: if self.__outputLeadString!="": print self.__outputLeadString + oStr.strip().replace("\n","\n"+self.__outputLeadString) else: print oStr.strip().replace("\n","\n"+self.__outputLeadString) if self.__stringOutput: self.__outputString.write(oStr) #*********** end __handleOutput ******** #*********** __connectServerViaScheduler **************** def __connectServerViaScheduler(self, cmplUrl): self.__cmplGridSchedulerUrl = self.__cmplUrl self.__cmplUrl=cmplUrl self.__cmplGridScheduler=self.__cmplServer self.__serverMode = CMPL_GRID try: self.__cmplServer = xmlrpclib.ServerProxy(self.__cmplUrl, allow_none=False) self.__handleOutput("Connected with CmplServer at " + self.__cmplUrl + " with jobId " + self.__jobId + \ " >> maxServerTries <" + str(self.__maxCmplServerTries)+"> maxQueuingTime <"+str(self.__maxCmplServerQueuingTime)+">") except: self.__remoteStatus = CMPLSERVER_ERROR raise CmplException(str(sys.exc_info()[1]) ) #*********** end __connectServerViaScheduler **************** #*********** __knockScheduler ******************* def __knockScheduler(self): if self.__remoteMode: if not self.__cmplServerRunning: raise CmplException("Model is not connected to a CmplScheduler" ) if self.__remoteStatus!=PROBLEM_CANCELED: ret = self.__cmplServerExecute("knock") self.__remoteStatus=ret[0] if self.__remoteStatus == CMPLGRID_SCHEDULER_ERROR: self.__cleanUp() raise CmplException(ret[1] ) elif self.__remoteStatus == CMPLGRID_SCHEDULER_OK: self.__connectServerViaScheduler(ret[1]) else: raise CmplException("Cmpl::knock can only be used in remote mode" ) #*********** end __knockScheduler *************** #*********** cmplServerExecute ******* def __cmplServerExecute(self, method=""): ret=[] tries=0 while True: try: if method=="cancel": ret = self.__cmplServer.cancel(self.__jobId) elif method=="removeProblem": ret = self.__cmplServer.removeProblem(self.__jobId) elif method=="send": ret = self.__cmplServer.send(self.__instStr) elif method=="knock": ret = self.__cmplServer.knock(self.__jobId) elif method=="getCmplMessages": ret = self.__cmplServer.getCmplMessages(self.__jobId) elif method=="getSolutions": ret = self.__cmplServer.getSolutions(self.__jobId) elif method=="getCmplInfo": ret = self.__cmplServer.getCmplInfo(self.__jobId) elif method=="cmplServerFailed": ret = self.__cmplGridScheduler.cmplServerFailed(self.__cmplUrl) elif method=="disconnectFromScheduler": ret = self.__cmplServer.disconnectProblem(self.__jobId) except : tries+=1 if tries==self.__maxCmplServerTries: self.__remoteStatus=CMPLSERVER_ERROR self.__cleanUp() raise CmplException("CmplServer error : "+ str(sys.exc_info()[1]) ) else: continue break return ret #******** end cmplServerExecute ******* #*********** writeSolFiles ********** def __writeSolFiles(self): self.__handleOutput("\n") if self.cmplSolFile != "" : if self.cmplSolFile == "cmplStandard": fName=self.__problem+".csol" else: fName=self.cmplSolFile self.saveSolution(fName) if self.asciiSolFile != "" : if self.asciiSolFile == "cmplStandard": fName=self.__problem+".sol" else: fName=self.asciiSolFile self.saveSolutionAscii(fName) if self.csvSolFile != "" : if self.csvSolFile == "cmplStandard": fName=self.__problem+".csv" else: fName=self.csvSolFile self.saveSolutionCsv(fName) #*********** end writeSolFiles ****** #*********** writeInfoFiles ********** def __writeInfoFiles(self): self.__handleOutput("\n") if self.__cmplInfos.statisticsFileName != "" : if self.__cmplInfos.statisticsFileName == "stdOut": self.__handleOutput( self.__cmplInfos.statisticsText) self.__handleOutput("\n") else: self.__writeAsciiFile(self.__cmplInfos.statisticsFileName, self.__cmplInfos.statisticsText) self.__handleOutput( "Statistics written to file: " + self.__cmplInfos.statisticsFileName ) if self.__cmplInfos.varProdFileName != "" : if self.__cmplInfos.varProdFileName == "stdOut": self.__handleOutput( self.__cmplInfos.varProdText) self.__handleOutput("\n") else: self.__writeAsciiFile(self.__cmplInfos.varProdFileName, self.__cmplInfos.varProdtext) self.__handleOutput( "Variable products statistics written to file: " + self.__cmplInfos.varProdFileName ) if self.__cmplInfos.matrixFileName != "" : if self.__cmplInfos.matrixFileName == "stdOut": self.__handleOutput( self.__cmplInfos.matrixText) self.__handleOutput("\n") else: self.__writeAsciiFile(self.__cmplInfos.matrixFileName, self.__cmplInfos.matrixText) self.__handleOutput( "Generated matrix written to file: " + self.__cmplInfos.matrixFileName ) #*********** end writeInfoFiles ****** #*********** __writeAsciiFile ******* def __writeAsciiFile(self, fname, str): try: f = open(fname, 'w') f.write(str) f.close() except IOError, e: raise CmplException("IO error for file " + fname + ": "+e) #******** end __writeAsciiFile ******* #*********** __writeCmplFile ******* def __writeCmplFile(self): try: self.__model = os.path.basename(self.__model) self.__model = os.path.realpath(os.path.normpath(os.path.join(tempfile.gettempdir(), self.__model))) f = open(self.__model, 'w') f.write(self.__modelDef) f.close() except IOError, e: raise CmplException("IO error for file " + self.__model + ": "+e) #******** end __writeCmplFile ******* ################################################################################# # End Cmpl #################################################################################
from __future__ import absolute_import import hashlib import numpy as np from scipy.ndimage import convolve1d import scipy.stats import unittest import centrosome.threshold as T class TestThreshold(unittest.TestCase): def test_01_00_nothing(self): result = T.get_otsu_threshold(-np.ones((10, 10))) def test_01_01_negative_log_otsu(self): """regression test of img-1466""" r = np.random.RandomState() r.seed(11) img = r.uniform(size=(10, 10)) img[0, 0] = -1 unmasked = T.get_otsu_threshold(img) masked = T.get_otsu_threshold(img, img >= 0) self.assertEqual(unmasked, masked) def test_02_00_mct_zeros(self): result = T.get_maximum_correlation_threshold(np.zeros(0)) r = np.random.RandomState() r.seed(11) result = T.get_maximum_correlation_threshold( r.uniform(size=(10, 10)), mask=np.zeros((10, 10), bool) ) result = T.get_maximum_correlation_threshold(np.ones((10, 10)) * 0.5) self.assertEqual(result, 0.5) def test_02_01_mct_matches_reference_implementation(self): image = np.array( [ 0, 255, 231, 161, 58, 218, 95, 17, 136, 56, 179, 196, 1, 70, 173, 113, 192, 101, 223, 65, 127, 27, 234, 224, 205, 61, 74, 168, 63, 209, 120, 41, 218, 22, 66, 135, 244, 178, 193, 238, 140, 215, 96, 194, 158, 20, 169, 61, 55, 1, 130, 17, 240, 237, 15, 228, 136, 207, 65, 90, 191, 253, 63, 101, 206, 91, 154, 76, 43, 89, 213, 26, 17, 107, 251, 164, 206, 191, 73, 32, 51, 191, 80, 48, 61, 57, 4, 152, 74, 174, 103, 91, 106, 217, 194, 161, 248, 59, 198, 24, 22, 36, ], float, ) self.assertEqual(127, T.get_maximum_correlation_threshold(image)) def test_03_01_adaptive_threshold_same(self): r = np.random.RandomState() r.seed(31) block = r.uniform(size=(10, 10)) i, j = np.mgrid[0:10:2, 0:10:2] block[i, j] *= 0.5 i, j = np.mgrid[0:50, 0:50] img = block[i % 10, j % 10] global_threshold = T.get_global_threshold(T.TM_OTSU, block) adaptive_threshold = T.get_adaptive_threshold( T.TM_OTSU, img, global_threshold, adaptive_window_size=10 ) np.testing.assert_almost_equal(adaptive_threshold, global_threshold) def test_03_02_adaptive_threshold_different(self): r = np.random.RandomState() r.seed(31) block = r.uniform(size=(10, 10)) i, j = np.mgrid[0:10:2, 0:10:2] block[i, j] *= 0.5 i, j = np.mgrid[0:50, 0:50] img = block[i % 10, j % 10] * 0.5 # # Make the middle higher in intensity # img[20:30, 20:30] *= 2 global_threshold = T.get_global_threshold(T.TM_OTSU, block) adaptive_threshold = T.get_adaptive_threshold( T.TM_OTSU, img, global_threshold, adaptive_window_size=10 ) # # Check that the gradients are positive for i,j<15 and negative # for i,j>=15 # gradient = convolve1d(adaptive_threshold, [-1, 0, 1], 0) self.assertTrue(np.all(gradient[20:25, 20:30] < 0)) self.assertTrue(np.all(gradient[25:30, 20:30] > 0)) gradient = convolve1d(adaptive_threshold, [-1, 0, 1], 1) self.assertTrue(np.all(gradient[20:30, 20:25] < 0)) self.assertTrue(np.all(gradient[20:30, 25:30] > 0)) def get_random_state(self, *args): h = hashlib.sha1() h.update(np.array(args)) seed = np.frombuffer(h.digest(), np.uint32)[0] r = np.random.RandomState() r.seed(seed) return r def make_mog_image(self, loc1, sigma1, loc2, sigma2, frac1, size): """Make an image that is a mixture of gaussians loc{1,2} - mean of distribution # 1 and 2 sigma{1,2} - standard deviation of distribution # 1 and 2 frac1 - the fraction of pixels that are in distribution 1 size - the shape of the image. """ r = self.get_random_state(loc1, sigma1, loc2, sigma2, frac1, *tuple(size)) n_pixels = np.prod(size) p = r.permutation(n_pixels).reshape(size) s1 = int(n_pixels * frac1) s2 = n_pixels - s1 d = np.hstack( [ r.normal(loc=loc1, scale=sigma1, size=s1), r.normal(loc=loc2, scale=sigma2, size=s2), ] ) d[d < 0] = 0 d[d > 1] = 1 return d[p] def test_04_01_robust_background(self): img = self.make_mog_image(0.1, 0.05, 0.5, 0.2, 0.975, (45, 35)) t = T.get_global_threshold(T.TM_ROBUST_BACKGROUND, img) self.assertLess(abs(t - 0.2), 0.025) def test_04_02_robust_background_lower_outliers(self): img = self.make_mog_image(0.1, 0.05, 0.5, 0.2, 0.5, (45, 35)) t0 = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, lower_outlier_fraction=0 ) t05 = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, lower_outlier_fraction=0.05 ) self.assertNotEqual(t0, t05) def test_04_03_robust_background_upper_outliers(self): img = self.make_mog_image(0.1, 0.05, 0.5, 0.2, 0.9, (45, 35)) t0 = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, upper_outlier_fraction=0 ) t05 = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, upper_outlier_fraction=0.05 ) self.assertNotEqual(t0, t05) def test_04_04_robust_background_sd(self): img = self.make_mog_image(0.5, 0.1, 0.8, 0.01, 0.99, (45, 35)) t2 = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, lower_outlier_fraction=0, upper_outlier_fraction=0, ) self.assertLess(abs(t2 - 0.7), 0.02) t3 = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, lower_outlier_fraction=0, upper_outlier_fraction=0, deviations_above_average=2.5, ) self.assertLess(abs(t3 - 0.75), 0.02) def test_04_05_robust_background_median(self): img = self.make_mog_image(0.3, 0.05, 0.5, 0.2, 0.9, (45, 35)) t = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, average_fn=np.median, deviations_above_average=0, lower_outlier_fraction=0, upper_outlier_fraction=0, ) self.assertLess(abs(t - 0.3), 0.01) def test_04_06_robust_background_mode(self): img = self.make_mog_image(0.3, 0.05, 0.5, 0.2, 0.9, (45, 35)) img[(img > 0.25) & (img < 0.35)] = 0.304 t = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, average_fn=T.binned_mode, deviations_above_average=0, lower_outlier_fraction=0, upper_outlier_fraction=0, ) self.assertAlmostEqual(t, 0.304) def test_04_08_mad(self): img = self.make_mog_image(0.3, 0.05, 0.5, 0.2, 0.95, (45, 35)) t = T.get_global_threshold( T.TM_ROBUST_BACKGROUND, img, variance_fn=T.mad, deviations_above_average=2, lower_outlier_fraction=0, upper_outlier_fraction=0, ) norm = scipy.stats.norm(0, 0.05) # the MAD should be the expected value at the 75th percentile expected = 0.3 + 2 * norm.ppf(0.75) self.assertLess(np.abs(t - expected), 0.02) if __name__ == "__main__": unittest.main()
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Main script for running fivo""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import defaultdict import numpy as np import tensorflow as tf import bounds import data import models import summary_utils as summ tf.logging.set_verbosity(tf.logging.INFO) tf.app.flags.DEFINE_integer("random_seed", None, "A random seed for the data generating process. Same seed " "-> same data generating process and initialization.") tf.app.flags.DEFINE_enum("bound", "fivo", ["iwae", "fivo", "fivo-aux", "fivo-aux-td"], "The bound to optimize.") tf.app.flags.DEFINE_enum("model", "forward", ["forward", "long_chain"], "The model to use.") tf.app.flags.DEFINE_enum("q_type", "normal", ["normal", "simple_mean", "prev_state", "observation"], "The parameterization to use for q") tf.app.flags.DEFINE_enum("p_type", "unimodal", ["unimodal", "bimodal", "nonlinear"], "The type of prior.") tf.app.flags.DEFINE_boolean("train_p", True, "If false, do not train the model p.") tf.app.flags.DEFINE_integer("state_size", 1, "The dimensionality of the state space.") tf.app.flags.DEFINE_float("variance", 1.0, "The variance of the data generating process.") tf.app.flags.DEFINE_boolean("use_bs", True, "If False, initialize all bs to 0.") tf.app.flags.DEFINE_float("bimodal_prior_weight", 0.5, "The weight assigned to the positive mode of the prior in " "both the data generating process and p.") tf.app.flags.DEFINE_float("bimodal_prior_mean", None, "If supplied, sets the mean of the 2 modes of the prior to " "be 1 and -1 times the supplied value. This is for both the " "data generating process and p.") tf.app.flags.DEFINE_float("fixed_observation", None, "If supplied, fix the observation to a constant value in the" " data generating process only.") tf.app.flags.DEFINE_float("r_sigma_init", 1., "Value to initialize variance of r to.") tf.app.flags.DEFINE_enum("observation_type", models.STANDARD_OBSERVATION, models.OBSERVATION_TYPES, "The type of observation for the long chain model.") tf.app.flags.DEFINE_enum("transition_type", models.STANDARD_TRANSITION, models.TRANSITION_TYPES, "The type of transition for the long chain model.") tf.app.flags.DEFINE_float("observation_variance", None, "The variance of the observation. Defaults to 'variance'") tf.app.flags.DEFINE_integer("num_timesteps", 5, "Number of timesteps in the sequence.") tf.app.flags.DEFINE_integer("num_observations", 1, "The number of observations.") tf.app.flags.DEFINE_integer("steps_per_observation", 5, "The number of timesteps between each observation.") tf.app.flags.DEFINE_integer("batch_size", 4, "The number of examples per batch.") tf.app.flags.DEFINE_integer("num_samples", 4, "The number particles to use.") tf.app.flags.DEFINE_integer("num_eval_samples", 512, "The batch size and # of particles to use for eval.") tf.app.flags.DEFINE_string("resampling", "always", "How to resample. Accepts 'always','never', or a " "comma-separated list of booleans like 'true,true,false'.") tf.app.flags.DEFINE_enum("resampling_method", "multinomial", ["multinomial", "stratified", "systematic", "relaxed-logblend", "relaxed-stateblend", "relaxed-linearblend", "relaxed-stateblend-st",], "Type of resampling method to use.") tf.app.flags.DEFINE_boolean("use_resampling_grads", True, "Whether or not to use resampling grads to optimize FIVO." "Disabled automatically if resampling_method=relaxed.") tf.app.flags.DEFINE_boolean("disable_r", False, "If false, r is not used for fivo-aux and is set to zeros.") tf.app.flags.DEFINE_float("learning_rate", 1e-4, "The learning rate to use for ADAM or SGD.") tf.app.flags.DEFINE_integer("decay_steps", 25000, "The number of steps before the learning rate is halved.") tf.app.flags.DEFINE_integer("max_steps", int(1e6), "The number of steps to run training for.") tf.app.flags.DEFINE_string("logdir", "/tmp/fivo-aux", "Directory for summaries and checkpoints.") tf.app.flags.DEFINE_integer("summarize_every", int(1e3), "The number of steps between each evaluation.") FLAGS = tf.app.flags.FLAGS def combine_grad_lists(grad_lists): # grads is num_losses by num_variables. # each list could have different variables. # for each variable, sum the grads across all losses. grads_dict = defaultdict(list) var_dict = {} for grad_list in grad_lists: for grad, var in grad_list: if grad is not None: grads_dict[var.name].append(grad) var_dict[var.name] = var final_grads = [] for var_name, var in var_dict.iteritems(): grads = grads_dict[var_name] if len(grads) > 0: tf.logging.info("Var %s has combined grads from %s." % (var_name, [g.name for g in grads])) grad = tf.reduce_sum(grads, axis=0) else: tf.logging.info("Var %s has no grads" % var_name) grad = None final_grads.append((grad, var)) return final_grads def make_apply_grads_op(losses, global_step, learning_rate, lr_decay_steps): for l in losses: assert isinstance(l, bounds.Loss) lr = tf.train.exponential_decay( learning_rate, global_step, lr_decay_steps, 0.5, staircase=False) tf.summary.scalar("learning_rate", lr) opt = tf.train.AdamOptimizer(lr) ema_ops = [] grads = [] for loss_name, loss, loss_var_collection in losses: tf.logging.info("Computing grads of %s w.r.t. vars in collection %s" % (loss_name, loss_var_collection)) g = opt.compute_gradients(loss, var_list=tf.get_collection(loss_var_collection)) ema_ops.append(summ.summarize_grads(g, loss_name)) grads.append(g) all_grads = combine_grad_lists(grads) apply_grads_op = opt.apply_gradients(all_grads, global_step=global_step) # Update the emas after applying the grads. with tf.control_dependencies([apply_grads_op]): train_op = tf.group(*ema_ops) return train_op def add_check_numerics_ops(): check_op = [] for op in tf.get_default_graph().get_operations(): bad = ["logits/Log", "sample/Reshape", "log_prob/mul", "log_prob/SparseSoftmaxCrossEntropyWithLogits/Reshape", "entropy/Reshape", "entropy/LogSoftmax", "Categorical", "Mean"] if all([x not in op.name for x in bad]): for output in op.outputs: if output.dtype in [tf.float16, tf.float32, tf.float64]: if op._get_control_flow_context() is not None: # pylint: disable=protected-access raise ValueError("`tf.add_check_numerics_ops() is not compatible " "with TensorFlow control flow operations such as " "`tf.cond()` or `tf.while_loop()`.") message = op.name + ":" + str(output.value_index) with tf.control_dependencies(check_op): check_op = [tf.check_numerics(output, message=message)] return tf.group(*check_op) def create_long_chain_graph(bound, state_size, num_obs, steps_per_obs, batch_size, num_samples, num_eval_samples, resampling_schedule, use_resampling_grads, learning_rate, lr_decay_steps, dtype="float64"): num_timesteps = num_obs * steps_per_obs + 1 # Make the dataset. dataset = data.make_long_chain_dataset( state_size=state_size, num_obs=num_obs, steps_per_obs=steps_per_obs, batch_size=batch_size, num_samples=num_samples, variance=FLAGS.variance, observation_variance=FLAGS.observation_variance, dtype=dtype, observation_type=FLAGS.observation_type, transition_type=FLAGS.transition_type, fixed_observation=FLAGS.fixed_observation) itr = dataset.make_one_shot_iterator() _, observations = itr.get_next() # Make the dataset for eval eval_dataset = data.make_long_chain_dataset( state_size=state_size, num_obs=num_obs, steps_per_obs=steps_per_obs, batch_size=batch_size, num_samples=num_eval_samples, variance=FLAGS.variance, observation_variance=FLAGS.observation_variance, dtype=dtype, observation_type=FLAGS.observation_type, transition_type=FLAGS.transition_type, fixed_observation=FLAGS.fixed_observation) eval_itr = eval_dataset.make_one_shot_iterator() _, eval_observations = eval_itr.get_next() # Make the model. model = models.LongChainModel.create( state_size, num_obs, steps_per_obs, observation_type=FLAGS.observation_type, transition_type=FLAGS.transition_type, variance=FLAGS.variance, observation_variance=FLAGS.observation_variance, dtype=tf.as_dtype(dtype), disable_r=FLAGS.disable_r) # Compute the bound and loss if bound == "iwae": (_, losses, ema_op, _, _) = bounds.iwae( model, observations, num_timesteps, num_samples=num_samples) (eval_log_p_hat, _, _, _, eval_log_weights) = bounds.iwae( model, eval_observations, num_timesteps, num_samples=num_eval_samples, summarize=False) eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) elif bound == "fivo" or "fivo-aux": (_, losses, ema_op, _, _) = bounds.fivo( model, observations, num_timesteps, resampling_schedule=resampling_schedule, use_resampling_grads=use_resampling_grads, resampling_type=FLAGS.resampling_method, aux=("aux" in bound), num_samples=num_samples) (eval_log_p_hat, _, _, _, eval_log_weights) = bounds.fivo( model, eval_observations, num_timesteps, resampling_schedule=resampling_schedule, use_resampling_grads=False, resampling_type="multinomial", aux=("aux" in bound), num_samples=num_eval_samples, summarize=False) eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) summ.summarize_ess(eval_log_weights, only_last_timestep=True) tf.summary.scalar("log_p_hat", eval_log_p_hat) # Compute and apply grads. global_step = tf.train.get_or_create_global_step() apply_grads = make_apply_grads_op(losses, global_step, learning_rate, lr_decay_steps) # Update the emas after applying the grads. with tf.control_dependencies([apply_grads]): train_op = tf.group(ema_op) # We can't calculate the likelihood for most of these models # so we just return zeros. eval_likelihood = tf.zeros([], dtype=dtype) return global_step, train_op, eval_log_p_hat, eval_likelihood def create_graph(bound, state_size, num_timesteps, batch_size, num_samples, num_eval_samples, resampling_schedule, use_resampling_grads, learning_rate, lr_decay_steps, train_p, dtype='float64'): if FLAGS.use_bs: true_bs = None else: true_bs = [np.zeros([state_size]).astype(dtype) for _ in xrange(num_timesteps)] # Make the dataset. true_bs, dataset = data.make_dataset( bs=true_bs, state_size=state_size, num_timesteps=num_timesteps, batch_size=batch_size, num_samples=num_samples, variance=FLAGS.variance, prior_type=FLAGS.p_type, bimodal_prior_weight=FLAGS.bimodal_prior_weight, bimodal_prior_mean=FLAGS.bimodal_prior_mean, transition_type=FLAGS.transition_type, fixed_observation=FLAGS.fixed_observation, dtype=dtype) itr = dataset.make_one_shot_iterator() _, observations = itr.get_next() # Make the dataset for eval _, eval_dataset = data.make_dataset( bs=true_bs, state_size=state_size, num_timesteps=num_timesteps, batch_size=num_eval_samples, num_samples=num_eval_samples, variance=FLAGS.variance, prior_type=FLAGS.p_type, bimodal_prior_weight=FLAGS.bimodal_prior_weight, bimodal_prior_mean=FLAGS.bimodal_prior_mean, transition_type=FLAGS.transition_type, fixed_observation=FLAGS.fixed_observation, dtype=dtype) eval_itr = eval_dataset.make_one_shot_iterator() _, eval_observations = eval_itr.get_next() # Make the model. if bound == "fivo-aux-td": model = models.TDModel.create( state_size, num_timesteps, variance=FLAGS.variance, train_p=train_p, p_type=FLAGS.p_type, q_type=FLAGS.q_type, mixing_coeff=FLAGS.bimodal_prior_weight, prior_mode_mean=FLAGS.bimodal_prior_mean, observation_variance=FLAGS.observation_variance, transition_type=FLAGS.transition_type, use_bs=FLAGS.use_bs, dtype=tf.as_dtype(dtype), random_seed=FLAGS.random_seed) else: model = models.Model.create( state_size, num_timesteps, variance=FLAGS.variance, train_p=train_p, p_type=FLAGS.p_type, q_type=FLAGS.q_type, mixing_coeff=FLAGS.bimodal_prior_weight, prior_mode_mean=FLAGS.bimodal_prior_mean, observation_variance=FLAGS.observation_variance, transition_type=FLAGS.transition_type, use_bs=FLAGS.use_bs, r_sigma_init=FLAGS.r_sigma_init, dtype=tf.as_dtype(dtype), random_seed=FLAGS.random_seed) # Compute the bound and loss if bound == "iwae": (_, losses, ema_op, _, _) = bounds.iwae( model, observations, num_timesteps, num_samples=num_samples) (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.iwae( model, eval_observations, num_timesteps, num_samples=num_eval_samples, summarize=True) eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) elif "fivo" in bound: if bound == "fivo-aux-td": (_, losses, ema_op, _, _) = bounds.fivo_aux_td( model, observations, num_timesteps, resampling_schedule=resampling_schedule, num_samples=num_samples) (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.fivo_aux_td( model, eval_observations, num_timesteps, resampling_schedule=resampling_schedule, num_samples=num_eval_samples, summarize=True) else: (_, losses, ema_op, _, _) = bounds.fivo( model, observations, num_timesteps, resampling_schedule=resampling_schedule, use_resampling_grads=use_resampling_grads, resampling_type=FLAGS.resampling_method, aux=("aux" in bound), num_samples=num_samples) (eval_log_p_hat, _, _, eval_states, eval_log_weights) = bounds.fivo( model, eval_observations, num_timesteps, resampling_schedule=resampling_schedule, use_resampling_grads=False, resampling_type="multinomial", aux=("aux" in bound), num_samples=num_eval_samples, summarize=True) eval_log_p_hat = tf.reduce_mean(eval_log_p_hat) summ.summarize_ess(eval_log_weights, only_last_timestep=True) # if FLAGS.p_type == "bimodal": # # create the observations that showcase the model. # mode_odds_ratio = tf.convert_to_tensor([1., 3., 1./3., 512., 1./512.], # dtype=tf.float64) # mode_odds_ratio = tf.expand_dims(mode_odds_ratio, 1) # k = ((num_timesteps+1) * FLAGS.variance) / (2*FLAGS.bimodal_prior_mean) # explain_obs = tf.reduce_sum(model.p.bs) + tf.log(mode_odds_ratio) * k # explain_obs = tf.tile(explain_obs, [num_eval_samples, 1]) # # run the model on the explainable observations # if bound == "iwae": # (_, _, _, explain_states, explain_log_weights) = bounds.iwae( # model, # explain_obs, # num_timesteps, # num_samples=num_eval_samples) # elif bound == "fivo" or "fivo-aux": # (_, _, _, explain_states, explain_log_weights) = bounds.fivo( # model, # explain_obs, # num_timesteps, # resampling_schedule=resampling_schedule, # use_resampling_grads=False, # resampling_type="multinomial", # aux=("aux" in bound), # num_samples=num_eval_samples) # summ.summarize_particles(explain_states, # explain_log_weights, # explain_obs, # model) # Calculate the true likelihood. if hasattr(model.p, 'likelihood') and callable(getattr(model.p, 'likelihood')): eval_likelihood = model.p.likelihood(eval_observations)/ FLAGS.num_timesteps else: eval_likelihood = tf.zeros_like(eval_log_p_hat) tf.summary.scalar("log_p_hat", eval_log_p_hat) tf.summary.scalar("likelihood", eval_likelihood) tf.summary.scalar("bound_gap", eval_likelihood - eval_log_p_hat) summ.summarize_model(model, true_bs, eval_observations, eval_states, bound, summarize_r=not bound == "fivo-aux-td") # Compute and apply grads. global_step = tf.train.get_or_create_global_step() apply_grads = make_apply_grads_op(losses, global_step, learning_rate, lr_decay_steps) # Update the emas after applying the grads. with tf.control_dependencies([apply_grads]): train_op = tf.group(ema_op) #train_op = tf.group(ema_op, add_check_numerics_ops()) return global_step, train_op, eval_log_p_hat, eval_likelihood def parse_resampling_schedule(schedule, num_timesteps): schedule = schedule.strip().lower() if schedule == "always": return [True] * (num_timesteps - 1) + [False] elif schedule == "never": return [False] * num_timesteps elif "every" in schedule: n = int(schedule.split("_")[1]) return [(i+1) % n == 0 for i in xrange(num_timesteps)] else: sched = [x.strip() == "true" for x in schedule.split(",")] assert len( sched ) == num_timesteps, "Wrong number of timesteps in resampling schedule." return sched def create_log_hook(step, eval_log_p_hat, eval_likelihood): def summ_formatter(d): return ("Step {step}, log p_hat: {log_p_hat:.5f} likelihood: {likelihood:.5f}".format(**d)) hook = tf.train.LoggingTensorHook( { "step": step, "log_p_hat": eval_log_p_hat, "likelihood": eval_likelihood, }, every_n_iter=FLAGS.summarize_every, formatter=summ_formatter) return hook def create_infrequent_summary_hook(): infrequent_summary_hook = tf.train.SummarySaverHook( save_steps=10000, output_dir=FLAGS.logdir, summary_op=tf.summary.merge_all(key="infrequent_summaries") ) return infrequent_summary_hook def main(unused_argv): if FLAGS.model == "long_chain": resampling_schedule = parse_resampling_schedule(FLAGS.resampling, FLAGS.num_timesteps + 1) else: resampling_schedule = parse_resampling_schedule(FLAGS.resampling, FLAGS.num_timesteps) if FLAGS.random_seed is None: seed = np.random.randint(0, high=10000) else: seed = FLAGS.random_seed tf.logging.info("Using random seed %d", seed) if FLAGS.model == "long_chain": assert FLAGS.q_type == "normal", "Q type %s not supported for long chain models" % FLAGS.q_type assert FLAGS.p_type == "unimodal", "Bimodal priors are not supported for long chain models" assert not FLAGS.use_bs, "Bs are not supported with long chain models" assert FLAGS.num_timesteps == FLAGS.num_observations * FLAGS.steps_per_observation, "Num timesteps does not match." assert FLAGS.bound != "fivo-aux-td", "TD Training is not compatible with long chain models." if FLAGS.model == "forward": if "nonlinear" not in FLAGS.p_type: assert FLAGS.transition_type == models.STANDARD_TRANSITION, "Non-standard transitions not supported by the forward model." assert FLAGS.observation_type == models.STANDARD_OBSERVATION, "Non-standard observations not supported by the forward model." assert FLAGS.observation_variance is None, "Forward model does not support observation variance." assert FLAGS.num_observations == 1, "Forward model only supports 1 observation." if "relaxed" in FLAGS.resampling_method: FLAGS.use_resampling_grads = False assert FLAGS.bound != "fivo-aux-td", "TD Training is not compatible with relaxed resampling." if FLAGS.observation_variance is None: FLAGS.observation_variance = FLAGS.variance if FLAGS.p_type == "bimodal": assert FLAGS.bimodal_prior_mean is not None, "Must specify prior mean if using bimodal p." if FLAGS.p_type == "nonlinear" or FLAGS.p_type == "nonlinear-cauchy": assert not FLAGS.use_bs, "Using bs is not compatible with the nonlinear model." g = tf.Graph() with g.as_default(): # Set the seeds. tf.set_random_seed(seed) np.random.seed(seed) if FLAGS.model == "long_chain": (global_step, train_op, eval_log_p_hat, eval_likelihood) = create_long_chain_graph( FLAGS.bound, FLAGS.state_size, FLAGS.num_observations, FLAGS.steps_per_observation, FLAGS.batch_size, FLAGS.num_samples, FLAGS.num_eval_samples, resampling_schedule, FLAGS.use_resampling_grads, FLAGS.learning_rate, FLAGS.decay_steps) else: (global_step, train_op, eval_log_p_hat, eval_likelihood) = create_graph( FLAGS.bound, FLAGS.state_size, FLAGS.num_timesteps, FLAGS.batch_size, FLAGS.num_samples, FLAGS.num_eval_samples, resampling_schedule, FLAGS.use_resampling_grads, FLAGS.learning_rate, FLAGS.decay_steps, FLAGS.train_p) log_hooks = [create_log_hook(global_step, eval_log_p_hat, eval_likelihood)] if len(tf.get_collection("infrequent_summaries")) > 0: log_hooks.append(create_infrequent_summary_hook()) tf.logging.info("trainable variables:") tf.logging.info([v.name for v in tf.trainable_variables()]) tf.logging.info("p vars:") tf.logging.info([v.name for v in tf.get_collection("P_VARS")]) tf.logging.info("q vars:") tf.logging.info([v.name for v in tf.get_collection("Q_VARS")]) tf.logging.info("r vars:") tf.logging.info([v.name for v in tf.get_collection("R_VARS")]) tf.logging.info("r tilde vars:") tf.logging.info([v.name for v in tf.get_collection("R_TILDE_VARS")]) with tf.train.MonitoredTrainingSession( master="", is_chief=True, hooks=log_hooks, checkpoint_dir=FLAGS.logdir, save_checkpoint_secs=120, save_summaries_steps=FLAGS.summarize_every, log_step_count_steps=FLAGS.summarize_every) as sess: cur_step = -1 while True: if sess.should_stop() or cur_step > FLAGS.max_steps: break # run a step _, cur_step = sess.run([train_op, global_step]) if __name__ == "__main__": tf.app.run(main)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import glanceclient.v1.images import routes import webob import webob.dec import webob.request from nova.api import auth as api_auth from nova.api import openstack as openstack_api from nova.api.openstack import auth from nova.api.openstack import compute from nova.api.openstack.compute import limits from nova.api.openstack.compute import versions from nova.api.openstack import urlmap from nova.api.openstack import wsgi as os_wsgi from nova.compute import api as compute_api from nova.compute import instance_types from nova.compute import vm_states from nova import context from nova.db.sqlalchemy import models from nova import exception as exc import nova.image.glance from nova.network import api as network_api from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova import quota from nova.tests import fake_network from nova.tests.glance import stubs as glance_stubs from nova import wsgi QUOTAS = quota.QUOTAS FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} class Context(object): pass class FakeRouter(wsgi.Router): def __init__(self, ext_mgr=None): pass @webob.dec.wsgify def __call__(self, req): res = webob.Response() res.status = '200' res.headers['X-Test-Success'] = 'True' return res @webob.dec.wsgify def fake_wsgi(self, req): return self.application def wsgi_app(inner_app_v2=None, fake_auth_context=None, use_no_auth=False, ext_mgr=None, init_only=None): if not inner_app_v2: inner_app_v2 = compute.APIRouter(ext_mgr, init_only) if use_no_auth: api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware( limits.RateLimitingMiddleware(inner_app_v2))) else: if fake_auth_context is not None: ctxt = fake_auth_context else: ctxt = context.RequestContext('fake', 'fake', auth_token=True) api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt, limits.RateLimitingMiddleware(inner_app_v2))) mapper = urlmap.URLMap() mapper['/v2'] = api_v2 mapper['/v1.1'] = api_v2 mapper['/'] = openstack_api.FaultWrapper(versions.Versions()) return mapper def stub_out_key_pair_funcs(stubs, have_key_pair=True): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] def one_key_pair(context, user_id, name): if name == 'key': return dict(name='key', public_key='public_key') else: raise exc.KeypairNotFound(user_id=user_id, name=name) def no_key_pair(context, user_id): return [] if have_key_pair: stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair) stubs.Set(nova.db, 'key_pair_get', one_key_pair) else: stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair) def stub_out_rate_limiting(stubs): def fake_rate_init(self, app): super(limits.RateLimitingMiddleware, self).__init__(app) self.application = app stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware, '__init__', fake_rate_init) stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware, '__call__', fake_wsgi) def stub_out_instance_quota(stubs, allowed, quota, resource='instances'): def fake_reserve(context, **deltas): requested = deltas.pop(resource, 0) if requested > allowed: quotas = dict(instances=1, cores=1, ram=1) quotas[resource] = quota usages = dict(instances=dict(in_use=0, reserved=0), cores=dict(in_use=0, reserved=0), ram=dict(in_use=0, reserved=0)) usages[resource]['in_use'] = (quotas[resource] * 0.9 - allowed) usages[resource]['reserved'] = quotas[resource] * 0.1 raise exc.OverQuota(overs=[resource], quotas=quotas, usages=usages) stubs.Set(QUOTAS, 'reserve', fake_reserve) def stub_out_networking(stubs): def get_my_ip(): return '127.0.0.1' stubs.Set(nova.netconf, '_get_my_ip', get_my_ip) def stub_out_compute_api_snapshot(stubs): def snapshot(self, context, instance, name, extra_properties=None): return dict(id='123', status='ACTIVE', name=name, properties=extra_properties) stubs.Set(compute_api.API, 'snapshot', snapshot) class stub_out_compute_api_backup(object): def __init__(self, stubs): self.stubs = stubs self.extra_props_last_call = None stubs.Set(compute_api.API, 'backup', self.backup) def backup(self, context, instance, name, backup_type, rotation, extra_properties=None): self.extra_props_last_call = extra_properties props = dict(backup_type=backup_type, rotation=rotation) props.update(extra_properties or {}) return dict(id='123', status='ACTIVE', name=name, properties=props) def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None): fake_network.stub_out_nw_api_get_instance_nw_info(stubs, spectacular=True) def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None): def get_floating_ips_by_fixed_address(self, context, fixed_ip): return ['1.2.3.4'] if func is None: func = get_floating_ips_by_fixed_address stubs.Set(network_api.API, 'get_floating_ips_by_fixed_address', func) def stub_out_nw_api(stubs, cls=None, private=None, publics=None): if not private: private = '192.168.0.3' if not publics: publics = ['1.2.3.4'] class Fake: def get_instance_nw_info(*args, **kwargs): pass def get_floating_ips_by_fixed_address(*args, **kwargs): return publics if cls is None: cls = Fake stubs.Set(network_api, 'API', cls) fake_network.stub_out_nw_api_get_instance_nw_info(stubs, spectacular=True) def _make_image_fixtures(): NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" image_id = 123 base_attrs = {'deleted': False} fixtures = [] def add_fixture(**kwargs): kwargs.update(base_attrs) fixtures.append(kwargs) # Public image add_fixture(id=image_id, name='public image', is_public=True, status='active', properties={'key1': 'value1'}, min_ram="128", min_disk="10") image_id += 1 # Snapshot for User 1 uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74' server_ref = 'http://localhost/v2/servers/' + uuid snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'} for status in ('queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete'): add_fixture(id=image_id, name='%s snapshot' % status, is_public=False, status=status, properties=snapshot_properties) image_id += 1 # Image without a name add_fixture(id=image_id, is_public=True, status='active', properties={}) return fixtures def stub_out_glanceclient_create(stubs, sent_to_glance): """ We return the metadata sent to glance by modifying the sent_to_glance dict in place. """ orig_add_image = glanceclient.v1.images.ImageManager.create def fake_create(context, metadata, data=None): sent_to_glance['metadata'] = metadata sent_to_glance['data'] = data return orig_add_image(metadata, data) stubs.Set(glanceclient.v1.images.ImageManager, 'create', fake_create) def stub_out_glance(stubs): def fake_get_remote_image_service(): client = glance_stubs.StubGlanceClient(_make_image_fixtures()) client_wrapper = nova.image.glance.GlanceClientWrapper() client_wrapper.host = 'fake_host' client_wrapper.port = 9292 client_wrapper.client = client return nova.image.glance.GlanceImageService(client=client_wrapper) stubs.Set(nova.image.glance, 'get_default_image_service', fake_get_remote_image_service) class FakeToken(object): id_count = 0 def __getitem__(self, key): return getattr(self, key) def __init__(self, **kwargs): FakeToken.id_count += 1 self.id = FakeToken.id_count for k, v in kwargs.iteritems(): setattr(self, k, v) class FakeRequestContext(context.RequestContext): def __init__(self, *args, **kwargs): kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') return super(FakeRequestContext, self).__init__(*args, **kwargs) class HTTPRequest(os_wsgi.Request): @classmethod def blank(cls, *args, **kwargs): kwargs['base_url'] = 'http://localhost/v2' use_admin_context = kwargs.pop('use_admin_context', False) out = os_wsgi.Request.blank(*args, **kwargs) out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake', is_admin=use_admin_context) return out class TestRouter(wsgi.Router): def __init__(self, controller): mapper = routes.Mapper() mapper.resource("test", "tests", controller=os_wsgi.Resource(controller)) super(TestRouter, self).__init__(mapper) class FakeAuthDatabase(object): data = {} @staticmethod def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod def auth_token_create(context, token): fake_token = FakeToken(created_at=timeutils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod def auth_token_destroy(context, token_id): token = FakeAuthDatabase.data.get('id_%i' % token_id) if token and token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data[token.token_hash] del FakeAuthDatabase.data['id_%i' % token_id] class FakeRateLimiter(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): return self.application def create_info_cache(nw_cache): if nw_cache is None: pub0 = ('192.168.1.100',) pub1 = ('2001:db8:0:1::1',) def _ip(ip): return {'address': ip, 'type': 'fixed'} nw_cache = [ {'address': 'aa:aa:aa:aa:aa:aa', 'id': 1, 'network': {'bridge': 'br0', 'id': 1, 'label': 'test1', 'subnets': [{'cidr': '192.168.1.0/24', 'ips': [_ip(ip) for ip in pub0]}, {'cidr': 'b33f::/64', 'ips': [_ip(ip) for ip in pub1]}]}}] return {"info_cache": {"network_info": nw_cache}} if not isinstance(nw_cache, basestring): nw_cache = jsonutils.dumps(nw_cache) return {"info_cache": {"network_info": nw_cache}} def get_fake_uuid(token=0): if not token in FAKE_UUIDS: FAKE_UUIDS[token] = str(uuid.uuid4()) return FAKE_UUIDS[token] def fake_instance_get(**kwargs): def _return_server(context, uuid): return stub_instance(1, **kwargs) return _return_server def fake_instance_get_all_by_filters(num_servers=5, **kwargs): def _return_servers(context, *args, **kwargs): servers_list = [] marker = None limit = None found_marker = False if "marker" in kwargs: marker = kwargs["marker"] if "limit" in kwargs: limit = kwargs["limit"] for i in xrange(num_servers): uuid = get_fake_uuid(i) server = stub_instance(id=i + 1, uuid=uuid, **kwargs) servers_list.append(server) if not marker is None and uuid == marker: found_marker = True servers_list = [] if not marker is None and not found_marker: raise exc.MarkerNotFound(marker=marker) if not limit is None: servers_list = servers_list[:limit] return servers_list return _return_servers def stub_instance(id, user_id=None, project_id=None, host=None, vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", name=None, key_name='', access_ipv4=None, access_ipv6=None, progress=0, auto_disk_config=False, display_name=None, include_fake_metadata=True, config_drive=None, power_state=None, nw_cache=None, metadata=None, security_groups=None, root_device_name=None, limit=None, marker=None): if user_id is None: user_id = 'fake_user' if project_id is None: project_id = 'fake_project' if metadata: metadata = [{'key':k, 'value':v} for k, v in metadata.items()] elif include_fake_metadata: metadata = [models.InstanceMetadata(key='seq', value=str(id))] else: metadata = [] inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id)) if host is not None: host = str(host) if key_name: key_data = 'FAKE' else: key_data = '' if security_groups is None: security_groups = [{"id": 1, "name": "test"}] # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": server_name = "reservation_%s" % (reservation_id, ) info_cache = create_info_cache(nw_cache) instance = { "id": int(id), "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "user_id": user_id, "project_id": project_id, "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, "key_name": key_name, "key_data": key_data, "config_drive": config_drive, "vm_state": vm_state or vm_states.BUILDING, "task_state": task_state, "power_state": power_state, "memory_mb": 0, "vcpus": 0, "root_gb": 0, "ephemeral_gb": 0, "hostname": display_name or server_name, "host": host, "instance_type_id": 1, "instance_type": dict(inst_type), "user_data": "", "reservation_id": reservation_id, "mac_address": "", "scheduled_at": timeutils.utcnow(), "launched_at": timeutils.utcnow(), "terminated_at": timeutils.utcnow(), "availability_zone": "", "display_name": display_name or server_name, "display_description": "", "locked": False, "metadata": metadata, "access_ip_v4": access_ipv4, "access_ip_v6": access_ipv6, "uuid": uuid, "progress": progress, "auto_disk_config": auto_disk_config, "name": "instance-%s" % id, "shutdown_terminate": True, "disable_terminate": False, "security_groups": security_groups, "root_device_name": root_device_name} instance.update(info_cache) return instance def stub_volume(id, **kwargs): volume = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'host': 'fakehost', 'size': 1, 'availability_zone': 'fakeaz', 'instance_uuid': 'fakeuuid', 'mountpoint': '/', 'status': 'fakestatus', 'attach_status': 'attached', 'name': 'vol name', 'display_name': 'displayname', 'display_description': 'displaydesc', 'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1), 'snapshot_id': None, 'volume_type_id': 'fakevoltype', 'volume_metadata': [], 'volume_type': {'name': 'vol_type_name'}} volume.update(kwargs) return volume def stub_volume_create(self, context, size, name, description, snapshot, **param): vol = stub_volume('1') vol['size'] = size vol['display_name'] = name vol['display_description'] = description try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): vol['snapshot_id'] = None vol['availability_zone'] = param.get('availability_zone', 'fakeaz') return vol def stub_volume_create_from_image(self, context, size, name, description, snapshot, volume_type, metadata, availability_zone): vol = stub_volume('1') vol['status'] = 'creating' vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['availability_zone'] = 'nova' return vol def stub_volume_update(self, context, *args, **param): pass def stub_volume_delete(self, context, *args, **param): pass def stub_volume_get(self, context, volume_id): return stub_volume(volume_id) def stub_volume_get_notfound(self, context, volume_id): raise exc.VolumeNotFound(volume_id=volume_id) def stub_volume_get_all(context, search_opts=None): return [stub_volume(100, project_id='fake'), stub_volume(101, project_id='superfake'), stub_volume(102, project_id='superduperfake')] def stub_volume_get_all_by_project(self, context, search_opts=None): return [stub_volume_get(self, context, '1')] def stub_snapshot(id, **kwargs): snapshot = { 'id': id, 'volume_id': 12, 'status': 'available', 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': 'fake' } snapshot.update(kwargs) return snapshot def stub_snapshot_get_all(self): return [stub_snapshot(100, project_id='fake'), stub_snapshot(101, project_id='superfake'), stub_snapshot(102, project_id='superduperfake')] def stub_snapshot_get_all_by_project(self, context): return [stub_snapshot(1)]
import os import re import subprocess from django.utils.text import slugify from django.conf import settings from django.core.cache import cache # These options are passed to Fabric as: fab task --abort-on-prompts=True --user=root ... fabric_special_options = ['no_agent', 'forward-agent', 'config', 'disable-known-hosts', 'keepalive', 'password', 'parallel', 'no-pty', 'reject-unknown-hosts', 'skip-bad-hosts', 'timeout', 'command-timeout', 'user', 'warn-only', 'pool-size', 'key_filename'] def check_output(command, shell=False): executable = None if shell: executable = getattr(settings, 'SHELL', '/bin/sh') return subprocess.check_output(command, shell=shell, executable=executable) def check_output_with_ssh_key(command): if getattr(settings, 'GIT_SSH_KEY_LOCATION', None): return check_output('ssh-agent bash -c "ssh-add {};{}"'.format(settings.GIT_SSH_KEY_LOCATION, command), shell=True) else: return check_output([command], shell=True) def update_project_git(project, cache_dir, repo_dir): if not os.path.exists(repo_dir): if not os.path.exists(cache_dir): os.makedirs(cache_dir) check_output_with_ssh_key('git clone {} {}'.format(project.repo_url, repo_dir)) else: check_output_with_ssh_key( 'cd {0};git stash;git pull'.format(repo_dir) ) def setup_virtual_env_if_needed(repo_dir): env_dir = os.path.join(repo_dir, 'env') if not os.path.exists(env_dir): os.makedirs(env_dir) check_output("virtualenv {}".format(env_dir), shell=True) def update_project_requirements(project, repo_dir, activate_loc): pip_installs = ' '.join(project.fabfile_requirements.splitlines()) check_output_with_ssh_key('source {} && cd {};pip install {}'.format(activate_loc, repo_dir, pip_installs)) def get_fabfile_path(project): if project.use_repo_fabfile: cache_key = 'project_{}_fabfile_path'.format(project.pk) cached_result = cache.get(cache_key) if cached_result: return cached_result cache_dir = os.path.join(settings.PUBLIC_DIR, '.repo_caches') repo_dir = os.path.join(cache_dir, slugify(project.name)) update_project_git(project, cache_dir, repo_dir) setup_virtual_env_if_needed(repo_dir) activate_loc = os.path.join(repo_dir, 'env', 'bin', 'activate') update_project_requirements(project, repo_dir, activate_loc) result = os.path.join(repo_dir, 'fabfile.py'), activate_loc cache.set(cache_key, result, settings.FABRIC_TASK_CACHE_TIMEOUT) return result else: return settings.FABFILE_PATH, None def parse_task_details(name, task_output, alias, fabfile_path, active_loc): lines = task_output.splitlines() docstring = '\n'.join([line.strip() for line in lines[2:-2]]).strip() arguments_line = lines[-2].strip() if docstring == 'No docstring provided': docstring = None arguments_line = arguments_line[11:].strip() arguments = [] if arguments_line: for arg in arguments_line.split(', '): m = re.match(r"^([^=]+)(=(\'?)([^']*)\3)?$", arg) if m.group(2): # found argument with default value if m.group(3) == "'": # default value is a string arguments.append((m.group(1), m.group(4))) else: # found an argument with some other default value. # all fab arguments are translated to strings, so this doesnt make sense. Ignore the default. arguments.append(m.group(1)) else: arguments.append(m.group(1)) return alias + '__' + name, docstring, arguments, alias, fabfile_path, active_loc def get_fabric_tasks(project): """ Generate a list of fabric tasks that are available """ cache_key = 'project_{}_fabfile_tasks'.format(project.pk) cached_result = cache.get(cache_key) if cached_result: return cached_result try: dict_fabfile = {} fabfile_path, activate_loc = get_fabfile_path(project) dict_fabfile['default'] = fabfile_path, activate_loc if type(settings.FABFILES).__name__ == 'dict': for alias, fabfile in settings.FABFILES.items(): dict_fabfile[alias] = fabfile, None for alias, fabfile_setting in dict_fabfile.items(): fabfile_path, activate_loc = fabfile_setting if activate_loc: output = check_output('source {};fab --list --list-format=short --fabfile={}'.format(activate_loc, fabfile_path), shell=True) else: output = check_output(['fab', '--list', '--list-format=short', '--fabfile={}'.format(fabfile_path)]) lines = output.splitlines() tasks = [] for line in lines: name = line.strip() if activate_loc: o = check_output( 'source {};fab --display={} --fabfile={}'.format(activate_loc, name, fabfile_path), shell=True ) else: o = check_output( ['fab', '--display={}'.format(name), '--fabfile={}'.format(fabfile_path)] ) tasks.append(parse_task_details(name, o, alias, fabfile_path, activate_loc)) cache.set(cache_key, tasks, settings.FABRIC_TASK_CACHE_TIMEOUT) except Exception as e: tasks = [] return tasks def get_task_details(project, task_name): for details in get_fabric_tasks(project): if details[0] == task_name: return details return None def clean_key_string(key): key = key.replace('"', '\\"') # escape double quotes key = key.replace(',', '\,') # escape commas, that would be adding a new value key = key.replace('=', '\=') # escape = because that would be setting a new key return key def clean_value_string(value): value = value.replace('"', '\\"') # escape double quotes value = value.replace(',', '\,') # escape commas, that would be adding a new value value = value.replace('=', '\=') # escape = because that would be setting a new key return value def clean_arg_key_string(key): # this has to be a valid python function argument, so we can get pretty strict here key = re.sub(r'[^0-9a-zA-Z_]', '', key) # remove anything that isn't a number, letter, or underscore return key def get_key_value_string(key, config): key = clean_key_string(key) if config.data_type == config.BOOLEAN_TYPE: return key + ('' if config.get_value() else '=') elif config.data_type == config.NUMBER_TYPE: return key + '=' + str(config.get_value()) else: return '{}={}'.format(key, clean_value_string(config.get_value())) def update_config_values_from_session(configs, session): configs = configs.copy() for key, config in configs.iteritems(): if session.get('configuration_values', {}).get(key, None) is not None: config.set_value(session['configuration_values'][key]) del session['configuration_values'][key] arg_values = session.get('configuration_values', {}) return configs, arg_values def build_command(deployment, session, abort_on_prompts=True): # Get the dictionary of configurations for this stage configs = deployment.stage.get_configurations() configs, arg_values = update_config_values_from_session(configs, session) task_args = [key for key, config in configs.iteritems() if config.task_argument and config.task_name == deployment.task.name] task_configs = [key for key, config in configs.iteritems() if not config.task_argument] command_to_config = {x.replace('-', '_'): x for x in fabric_special_options} # Take the special env variables out normal_task_configs = list(set(task_configs) - set(command_to_config.keys())) # Special ones get set a different way special_task_configs = list(set(task_configs) & set(command_to_config.keys())) if deployment.task.name.find('__'): command = 'fab ' + deployment.task.name.split('__')[-1] else: command = 'fab ' + deployment.task.name task_details = get_task_details(deployment.stage.project, deployment.task.name) task_args = list(set(task_args + [x[0] if isinstance(x, tuple) else x for x in task_details[2]])) if task_args: key_value_strings = [] for key in task_args: if key in configs: value = unicode(configs[key].get_value()) elif key in arg_values: value = unicode(arg_values[key]) else: continue cleaned_key = clean_arg_key_string(key) value = clean_value_string(value) key_value_strings.append('{}="{}"'.format(cleaned_key, value)) if key_value_strings: command += ':' command += ','.join(key_value_strings) if normal_task_configs: command += ' --set ' command += '"' + ','.join(get_key_value_string(key, configs[key]) for key in normal_task_configs) + '"' if special_task_configs: for key in special_task_configs: if key == 'key_filename': command += ' -i ' + configs[key].get_value() else: command += ' --' + get_key_value_string(command_to_config[key], configs[key]) if abort_on_prompts: command += ' --abort-on-prompts' hosts = deployment.stage.hosts.values_list('name', flat=True) if hosts: command += ' --hosts=' + ','.join(hosts) #fabfile_path, active_loc = get_fabfile_path(deployment.stage.project) fabfile_path = task_details[4] active_loc = task_details[5] command += ' --fabfile={}'.format(fabfile_path) if active_loc: return 'source {};'.format(active_loc) + ' ' + command else: return command
from weakref import WeakSet from syn.five import STR, strf from collections import Iterable from random import randrange, choice from syn.base_utils import hasmethod, message, nearest_base, get_typename, \ istr, rand_primitive, collection_equivalent from syn.types import generate, enumeration_value #------------------------------------------------------------------------------- # Type Registry GENERABLE_TYPE_REGISTRY = WeakSet() #------------------------------------------------------------------------------- # Base Class class Type(object): '''A representation for various possible types syn supports.''' __slots__ = ('__weakref__',) register_generable = False def __init__(self): if self.register_generable: GENERABLE_TYPE_REGISTRY.add(self) def __eq__(self, other): return type(self) is type(other) def __ne__(self, other): return not self == other def __hash__(self): return hash(id(self)) def check(self, value): raise NotImplementedError @classmethod def dispatch(cls, obj): if isinstance(obj, Type): return obj if obj is None: return AnyType() if isinstance(obj, type): if issubclass(obj, TypeExtension): return obj() return TypeType(obj) if isinstance(obj, tuple): # Treat a singleton tuple as its element if len(obj) == 1: return cls.dispatch(obj[0]) return MultiType(obj) # Exclude bytes b/c it is more closely related to string than list if isinstance(obj, Iterable) and not isinstance(obj, STR + (bytes,)): return ValuesType(obj) raise TypeError('Unable to dispatch appropriate type represetation' ' for {}'.format(obj)) def coerce(self, value, **kwargs): raise NotImplementedError def display(self): '''Returns a quasi-intuitive string representation of the type.''' raise NotImplementedError def enumeration_value(self, x, **kwargs): '''Return the enumeration value for *x* for this type.''' raise NotImplementedError def generate(self, **kwargs): '''Returns a value for this type.''' raise NotImplementedError def query(self, value): try: self.check(value) return True except TypeError: return False def query_exception(self, value): try: self.check(value) return True, None except TypeError as e: return False, e def rst(self): '''Returns a string representation of the type for RST documentation.''' return self.display() def validate(self, value): raise NotImplementedError #------------------------------------------------------------------------------- # Any Type class AnyType(Type): def check(self, value): pass def coerce(self, value, **kwargs): return value def display(self): return 'any' def enumeration_value(self, x, **kwargs): max_enum = kwargs.get('max_enum', 20) types = kwargs.get('types', GENERABLE_TYPE_REGISTRY) N = randrange(min(len(types), max_enum)) for k, typ in enumerate(types): if k == N: try: return typ.enumeration_value(x, **kwargs) except: return enumeration_value(int, x, **kwargs) def generate(self, **kwargs): max_enum = kwargs.get('max_enum', 20) types = kwargs.get('types', GENERABLE_TYPE_REGISTRY) N = randrange(min(len(types), max_enum)) for k, typ in enumerate(types): if k == N: try: return typ.generate(**kwargs) except: return rand_primitive() def validate(self, value): pass #------------------------------------------------------------------------------- # Type Type class TypeType(Type): __slots__ = ('type', 'call_coerce', 'call_validate') register_generable = True def __init__(self, typ): super(TypeType, self).__init__() self.type = typ self.call_coerce = hasmethod(self.type, 'coerce') self.call_validate = hasmethod(self.type, 'validate') def __eq__(self, other): if super(TypeType, self).__eq__(other): if self.type == other.type: if self.call_coerce == other.call_coerce: if self.call_validate == other.call_validate: return True return False def __hash__(self): return hash(id(self)) def check(self, value): if not isinstance(value, self.type): raise TypeError('Expected value of type {}; got: {}' .format(self.type, value)) def coerce(self, value, **kwargs): if self.query(value): return value try: if self.call_coerce: return self.type.coerce(value, **kwargs) return self.type(value) except Exception as e: raise TypeError('Cannot coerce {} to type {}: {}' .format(value, self.type, message(e))) def display(self): return get_typename(self.type) def enumeration_value(self, x, **kwargs): return enumeration_value(self.type, x, **kwargs) def generate(self, **kwargs): return generate(self.type, **kwargs) def rst(self): return '*' + self.display() + '*' def validate(self, value): self.check(value) if self.call_validate: value.validate() #------------------------------------------------------------------------------- # Values Type class ValuesType(Type): '''A set (or list) of values, any of which is valid. Think of this is a denotational definition of the type. ''' __slots__ = ('values', 'indexed_values') register_generable = True def __init__(self, values): super(ValuesType, self).__init__() self.values = values self.indexed_values = values if not hasattr(values, '__getitem__'): self.indexed_values = list(values) def __eq__(self, other): if super(ValuesType, self).__eq__(other): if collection_equivalent(self.indexed_values, other.indexed_values): return True return False def __hash__(self): return hash(id(self)) def check(self, value): if value not in self.values: raise TypeError('Invalid value: {}'.format(value)) def coerce(self, value, **kwargs): try: self.check(value) except TypeError as e: raise TypeError('Cannot coerce {}: {}'.format(value, message(e))) return value def display(self): return istr(list(self.values)) def enumeration_value(self, x, **kwargs): idx = x % len(self.indexed_values) return self.indexed_values[idx] def generate(self, **kwargs): return choice(self.indexed_values) def validate(self, value): self.check(value) #------------------------------------------------------------------------------- # MultiType class MultiType(Type): '''A tuple of type specifiers, any of which may be valid. ''' __slots__ = ('types', 'typestr', 'typelist', 'typemap', 'is_typelist') register_generable = True def __init__(self, types): super(MultiType, self).__init__() self.is_typelist = False if all(isinstance(typ, type) for typ in types): self.is_typelist = True self.typelist = types self.typestr = ', '.join(map(strf, types)) self.types = [Type.dispatch(typ) for typ in types] self.typemap = dict(zip(types, self.types)) def __eq__(self, other): if super(MultiType, self).__eq__(other): if self.types == other.types: return True return False def __hash__(self): return hash(id(self)) def check(self, value): if self.is_typelist: if isinstance(value, self.typelist): return self.typemap[nearest_base(type(value), self.typelist)] else: for typ in self.types: try: typ.check(value) return typ except TypeError: pass raise TypeError("Value '{}' is not any valid type: {}" .format(value, self.typestr)) def coerce(self, value, **kwargs): for typ in self.types: try: return typ.coerce(value, **kwargs) except TypeError: pass raise TypeError('Cannot coerce {} to any valid type: {}' .format(value, self.typestr)) def display(self): strs = [typ.display() for typ in self.types] return ' | '.join(strs) def enumeration_value(self, x, **kwargs): idx = x % len(self.types) return self.types[idx].enumeration_value(x, **kwargs) def generate(self, **kwargs): excludes = kwargs.get('exclude_types', []) if excludes: excludes = [Type.dispatch(typ) if not isinstance(typ, Type) else typ for typ in excludes] types = [typ for typ in self.types if typ not in excludes] else: types = self.types typ = choice(types) return typ.generate(**kwargs) def rst(self): strs = [typ.rst() for typ in self.types] return ' | '.join(strs) def validate(self, value): typ = self.check(value) typ.validate(value) #------------------------------------------------------------------------------- # Set class Set(Type): '''For explicitly wrapping a SetNode as a type (since automatic dispatching cannot be implemented at this level). ''' register_generable = True def __init__(self, set): super(Set, self).__init__() self.set = set self.set.validate() def __eq__(self, other): if super(Set, self).__eq__(other): if self.set == other.set: return True return False def __hash__(self): return hash(id(self)) def check(self, value): if not self.set.hasmember(value): raise TypeError('Set does not contain value: {}'.format(value)) def coerce(self, value, **kwargs): try: self.check(value) except TypeError as e: raise TypeError('Cannot coerce {}: {}'.format(value, message(e))) return value def display(self): return '<Set>' def generate(self, **kwargs): return self.set.sample(**kwargs) def validate(self, value): self.check(value) #------------------------------------------------------------------------------- # Schema class Schema(Type): '''For explicitly wrapping a Schema as a type (since automatic dispatching cannot be implemented at this level). ''' register_generable = True def __init__(self, schema): super(Schema, self).__init__() self.schema = schema def __eq__(self, other): if super(Schema, self).__eq__(other): if self.schema == other.schema: return True return False def __hash__(self): return hash(id(self)) def check(self, value): if not self.schema.match(value): raise TypeError('Schema does not match: {}'.format(value)) def coerce(self, value, **kwargs): # NOTE: this might not be the right behavior, ideally considered. # However, it is good enough for our present needs. return value def display(self): return '<Schema>' def generate(self, **kwargs): return self.schema.sample(**kwargs) def validate(self, value): self.check(value) #------------------------------------------------------------------------------- # TypeExtension class TypeExtension(Type): '''For extending the type system. ''' def validate(self, value): self.check(value) #------------------------------------------------------------------------------- # __all__ __all__ = ('Type', 'AnyType', 'TypeType', 'ValuesType', 'MultiType', 'Set', 'Schema', 'TypeExtension') #-------------------------------------------------------------------------------
#!/usr/bin/env python from nose.tools import * from nose import SkipTest import networkx as nx import io class TestGEXF(object): @classmethod def setupClass(cls): try: import xml.etree.ElementTree except ImportError: raise SkipTest('xml.etree.ElementTree not available.') def setUp(self): self.simple_directed_data="""<?xml version="1.0" encoding="UTF-8"?> <gexf xmlns="http://www.gexf.net/1.1draft" version="1.1"> <graph mode="static" defaultedgetype="directed"> <nodes> <node id="0" label="Hello" /> <node id="1" label="Word" /> </nodes> <edges> <edge id="0" source="0" target="1" /> </edges> </graph> </gexf> """ self.simple_directed_graph=nx.DiGraph() self.simple_directed_graph.add_node('0',label='Hello') self.simple_directed_graph.add_node('1',label='World') self.simple_directed_graph.add_edge('0','1',id='0') self.simple_directed_fh = \ io.BytesIO(self.simple_directed_data.encode('UTF-8')) self.attribute_data="""<?xml version="1.0" encoding="UTF-8"?> <gexf xmlns="http://www.gexf.net/1.1draft" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.gexf.net/1.1draft http://www.gexf.net/1.1draft/gexf.xsd" version="1.1"> <meta lastmodifieddate="2009-03-20"> <creator>Gephi.org</creator> <description>A Web network</description> </meta> <graph defaultedgetype="directed"> <attributes class="node"> <attribute id="0" title="url" type="string"/> <attribute id="1" title="indegree" type="integer"/> <attribute id="2" title="frog" type="boolean"> <default>true</default> </attribute> </attributes> <nodes> <node id="0" label="Gephi"> <attvalues> <attvalue for="0" value="http://gephi.org"/> <attvalue for="1" value="1"/> <attvalue for="2" value="false"/> </attvalues> </node> <node id="1" label="Webatlas"> <attvalues> <attvalue for="0" value="http://webatlas.fr"/> <attvalue for="1" value="2"/> <attvalue for="2" value="False"/> </attvalues> </node> <node id="2" label="RTGI"> <attvalues> <attvalue for="0" value="http://rtgi.fr"/> <attvalue for="1" value="1"/> <attvalue for="2" value="true"/> </attvalues> </node> <node id="3" label="BarabasiLab"> <attvalues> <attvalue for="0" value="http://barabasilab.com"/> <attvalue for="1" value="1"/> <attvalue for="2" value="True"/> </attvalues> </node> </nodes> <edges> <edge id="0" source="0" target="1"/> <edge id="1" source="0" target="2"/> <edge id="2" source="1" target="0"/> <edge id="3" source="2" target="1"/> <edge id="4" source="0" target="3"/> </edges> </graph> </gexf> """ self.attribute_graph=nx.DiGraph() self.attribute_graph.graph['node_default']={'frog':True} self.attribute_graph.add_node('0', label='Gephi', url='http://gephi.org', indegree=1, frog=False) self.attribute_graph.add_node('1', label='Webatlas', url='http://webatlas.fr', indegree=2, frog=False) self.attribute_graph.add_node('2', label='RTGI', url='http://rtgi.fr', indegree=1, frog=True) self.attribute_graph.add_node('3', label='BarabasiLab', url='http://barabasilab.com', indegree=1, frog=True) self.attribute_graph.add_edge('0','1',id='0') self.attribute_graph.add_edge('0','2',id='1') self.attribute_graph.add_edge('1','0',id='2') self.attribute_graph.add_edge('2','1',id='3') self.attribute_graph.add_edge('0','3',id='4') self.attribute_fh = io.BytesIO(self.attribute_data.encode('UTF-8')) self.simple_undirected_data="""<?xml version="1.0" encoding="UTF-8"?> <gexf xmlns="http://www.gexf.net/1.1draft" version="1.1"> <graph mode="static" defaultedgetype="undirected"> <nodes> <node id="0" label="Hello" /> <node id="1" label="Word" /> </nodes> <edges> <edge id="0" source="0" target="1" /> </edges> </graph> </gexf> """ self.simple_undirected_graph=nx.Graph() self.simple_undirected_graph.add_node('0',label='Hello') self.simple_undirected_graph.add_node('1',label='World') self.simple_undirected_graph.add_edge('0','1',id='0') self.simple_undirected_fh = io.BytesIO(self.simple_undirected_data.encode('UTF-8')) def test_read_simple_directed_graphml(self): G=self.simple_directed_graph H=nx.read_gexf(self.simple_directed_fh) assert_equal(sorted(G.nodes()),sorted(H.nodes())) assert_equal(sorted(G.edges()),sorted(H.edges())) assert_equal(sorted(G.edges(data=True)), sorted(H.edges(data=True))) self.simple_directed_fh.seek(0) def test_write_read_simple_directed_graphml(self): G=self.simple_directed_graph fh=io.BytesIO() nx.write_gexf(G,fh) fh.seek(0) H=nx.read_gexf(fh) assert_equal(sorted(G.nodes()),sorted(H.nodes())) assert_equal(sorted(G.edges()),sorted(H.edges())) assert_equal(sorted(G.edges(data=True)), sorted(H.edges(data=True))) self.simple_directed_fh.seek(0) def test_read_simple_undirected_graphml(self): G=self.simple_undirected_graph H=nx.read_gexf(self.simple_undirected_fh) assert_equal(sorted(G.nodes()),sorted(H.nodes())) assert_equal( sorted(sorted(e) for e in G.edges()), sorted(sorted(e) for e in H.edges())) self.simple_undirected_fh.seek(0) def test_read_attribute_graphml(self): G=self.attribute_graph H=nx.read_gexf(self.attribute_fh) assert_equal(sorted(G.nodes(True)),sorted(H.nodes(data=True))) ge=sorted(G.edges(data=True)) he=sorted(H.edges(data=True)) for a,b in zip(ge,he): assert_equal(a,b) self.attribute_fh.seek(0) def test_directed_edge_in_undirected(self): s="""<?xml version="1.0" encoding="UTF-8"?> <gexf xmlns="http://www.gexf.net/1.1draft" version="1.1"> <graph mode="static" defaultedgetype="undirected"> <nodes> <node id="0" label="Hello" /> <node id="1" label="Word" /> </nodes> <edges> <edge id="0" source="0" target="1" type="directed"/> </edges> </graph> </gexf> """ fh = io.BytesIO(s.encode('UTF-8')) assert_raises(nx.NetworkXError,nx.read_gexf,fh) def test_undirected_edge_in_directed(self): s="""<?xml version="1.0" encoding="UTF-8"?> <gexf xmlns="http://www.gexf.net/1.1draft" version="1.1"> <graph mode="static" defaultedgetype="directed"> <nodes> <node id="0" label="Hello" /> <node id="1" label="Word" /> </nodes> <edges> <edge id="0" source="0" target="1" type="undirected"/> </edges> </graph> </gexf> """ fh = io.BytesIO(s.encode('UTF-8')) assert_raises(nx.NetworkXError,nx.read_gexf,fh) def test_key_raises(self): s="""<?xml version="1.0" encoding="UTF-8"?> <gexf xmlns="http://www.gexf.net/1.1draft" version="1.1"> <graph mode="static" defaultedgetype="directed"> <nodes> <node id="0" label="Hello"> <attvalues> <attvalue for='0' value='1'/> </attvalues> </node> <node id="1" label="Word" /> </nodes> <edges> <edge id="0" source="0" target="1" type="undirected"/> </edges> </graph> </gexf> """ fh = io.BytesIO(s.encode('UTF-8')) assert_raises(nx.NetworkXError,nx.read_gexf,fh) def test_relabel(self): s="""<?xml version="1.0" encoding="UTF-8"?> <gexf xmlns="http://www.gexf.net/1.1draft" version="1.1"> <graph mode="static" defaultedgetype="directed"> <nodes> <node id="0" label="Hello" /> <node id="1" label="Word" /> </nodes> <edges> <edge id="0" source="0" target="1"/> </edges> </graph> </gexf> """ fh = io.BytesIO(s.encode('UTF-8')) G=nx.read_gexf(fh,relabel=True) assert_equal(sorted(G.nodes()),["Hello","Word"]) def test_default_attribute(self): G=nx.Graph() G.add_node(1,label='1',color='green') nx.add_path(G, [0,1,2,3]) G.add_edge(1,2,foo=3) G.graph['node_default']={'color':'yellow'} G.graph['edge_default']={'foo':7} fh = io.BytesIO() nx.write_gexf(G,fh) fh.seek(0) H=nx.read_gexf(fh,node_type=int) assert_equal(sorted(G.nodes()),sorted(H.nodes())) assert_equal( sorted(sorted(e) for e in G.edges()), sorted(sorted(e) for e in H.edges())) # Reading a gexf graph always sets mode attribute to either # 'static' or 'dynamic'. Remove the mode attribute from the # read graph for the sake of comparing remaining attributes. del H.graph['mode'] assert_equal(G.graph,H.graph) def test_serialize_ints_to_strings(self): G=nx.Graph() G.add_node(1,id=7,label=77) fh = io.BytesIO() nx.write_gexf(G,fh) fh.seek(0) H=nx.read_gexf(fh,node_type=int) assert_equal(list(H),[7]) assert_equal(H.node[7]['label'],'77') def test_write_with_node_attributes(self): # Addresses #673. G = nx.path_graph(4) for i in range(4): G.node[i]['id'] = i G.node[i]['label'] = i G.node[i]['pid'] = i expected = """<gexf version="1.1" xmlns="http://www.gexf.net/1.1draft" xmlns:viz="http://www.gexf.net/1.1draft/viz" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.w3.org/2001/XMLSchema-instance"> <graph defaultedgetype="undirected" mode="static"> <nodes> <node id="0" label="0" pid="0" /> <node id="1" label="1" pid="1" /> <node id="2" label="2" pid="2" /> <node id="3" label="3" pid="3" /> </nodes> <edges> <edge id="0" source="0" target="1" /> <edge id="1" source="1" target="2" /> <edge id="2" source="2" target="3" /> </edges> </graph> </gexf>""" obtained = '\n'.join(nx.generate_gexf(G)) assert_equal( expected, obtained ) def test_bool(self): G=nx.Graph() G.add_node(1, testattr=True) fh = io.BytesIO() nx.write_gexf(G,fh) fh.seek(0) H=nx.read_gexf(fh,node_type=int) assert_equal(H.node[1]['testattr'], True)
#!/usr/bin/python ''' Created on December 11, 2012 @author: Sybrand Strauss ''' import wx import settings import panda_menu import sys import os import os.path import subprocess import threading import response_event import version import messages import logging import config from bucket.abstract import ProgressMessage if sys.platform == 'win32': def open_folder(path): if not os.path.exists(path): os.makedirs(path) subprocess.call(['explorer', path]) class PandaMenu(wx.Menu): def __init__(self): wx.Menu.__init__(self) image = wx.Image('gfx/digital-panda-online-1616.png', wx.BITMAP_TYPE_ANY) self.bitmap = image.ConvertToBitmap() wx.EVT_PAINT(self, self.on_paint) class TaskBar(wx.TaskBarIcon): """ Icon that appears on TaskBar - Windows <= 8 have taskbar icons - this class will run fine in those environments. - Ubuntu with Unity >= 11.04 needs to use an Application Indicator - see http://unity.ubuntu.com/projects/appindicators/ - No idea what would have to be done for mac at this point in time. """ def __init__(self, outputQueue, inputQueue): super(TaskBar, self).__init__() self.icon = wx.IconFromBitmap(wx.Bitmap("gfx/icon1616.png")) #icon = wx.Icon('digital-panda-icon.ico', wx.BITMAP_TYPE_ICO) self.Bind(wx.EVT_TASKBAR_LEFT_DOWN, self.on_left_down) self.Bind(wx.EVT_TASKBAR_RIGHT_DOWN, self.on_right_down) self.dialog = None self.outputQueue = outputQueue self.inputQueue = inputQueue self.set_status('Starting...') """self.timer = wx.Timer(self) self.timer.Bind(wx.EVT_TIMER, self.on_timer) self.timer.Start(100)""" self.advancedMenu = self.create_advanced_menu() self.inputQueueThread = threading.Thread(target=self.queue_listener) self.isRunning = True self.inputQueueThread.start() def queue_listener(self): while self.isRunning: item = self.inputQueue.get() if item: if isinstance(item, messages.ShowSettings): event = panda_menu.SettingsEvent() wx.PostEvent(self.advancedMenu, event) elif isinstance(item, messages.Status): self.set_status(item.message) wx.PostEvent(self.advancedMenu, response_event.ResponseEvent(attr1=item.message)) elif isinstance(item, messages.Stop): break elif isinstance(item, ProgressMessage): parts = item.path.split('/') mBRead = item.bytes_read / 1024 / 1024 mBExpected = item.bytes_expected / 1024 / 1024 mBps = item._bytes_per_second / 1024 / 1024 message = ('Downloading %s (%.2fMB/%.2fMB @ %.2fMBps)' % (parts[-1], mBRead, mBExpected, mBps)) self.set_status(message) pass else: try: self.set_status(item) wx.PostEvent(self.advancedMenu, response_event.ResponseEvent(attr1=item)) finally: logging.info('exception') pass def set_status(self, status): self.status = status self.SetIcon(self.icon, 'Digital Panda v%s\r\n' 'Cloud Storage Sync Client\r\n' '%s' % (version.version, self.status)) if self.dialog: self.dialog.SetStatus(self.status) def on_left_down(self, event): self.show_advanced_menu() def on_right_down(self, event): # showing system default popup menu - will probably switch # over to using ONLY the advanced menu self.PopupMenu(self.create_popup_menu()) def create_popup_menu(self): """ Returns a popup menu. """ menu = wx.Menu() # open folder item = wx.MenuItem(menu, -1, 'Open Digital Panda folder') menu.Bind(wx.EVT_MENU, self.open_folder, id=item.GetId()) menu.AppendItem(item) # settings item = wx.MenuItem(menu, -1, 'Settings...') menu.Bind(wx.EVT_MENU, self.show_settings, id=item.GetId()) menu.AppendItem(item) # quit item = wx.MenuItem(menu, -1, 'Quit') menu.Bind(wx.EVT_MENU, self.on_exit, id=item.GetId()) menu.AppendItem(item) # status item = wx.MenuItem(menu, -1, 'Status: %s' % self.status) menu.AppendItem(item) return menu def create_advanced_menu(self): """ Returns an "advanced menu" - this is just a popup menu with nice graphics on it. """ advancedMenu = panda_menu.PandaMenu(None, -1, 'Advanced Menu') advancedMenu.Bind(panda_menu.EVT_EXIT, self.on_exit) advancedMenu.Bind(panda_menu.EVT_SETTINGS, self.show_settings) advancedMenu.Bind(panda_menu.EVT_OPEN_FOLDER, self.open_folder) return advancedMenu def on_exit(self, event): self.isRunning = False self.inputQueue.put(messages.Stop()) logging.debug('putting stop on queue') self.outputQueue.put(messages.Stop()) wx.CallAfter(self.Destroy) if self.dialog: self.dialog.Destroy() if self.advancedMenu: self.advancedMenu.Destroy() def show_settings(self, event): logging.debug('show_settings') if not self.dialog: self.dialog = settings.Settings(None, -1, 'Digital Panda Settings', self.status, self.outputQueue) self.dialog.Center() self.dialog.Show(True) else: self.dialog.SetStatus(self.status) self.dialog.Show(True) # simply calling .Raise() doesn't work in windows # so we change the style to on top, and back again style = self.dialog.GetWindowStyle() self.dialog.SetWindowStyle(style | wx.STAY_ON_TOP) self.dialog.SetWindowStyle(style) def open_folder(self, event): """ Open the sync folder (and creates it if it doesn't exist) """ if self.advancedMenu: self.advancedMenu.Show(False) home = os.path.expanduser('~') c = config.Config() panda = None if c.username: # try for full path if there is a username panda = os.path.join(home, 'Digital Panda', c.username) if not os.path.exists(panda): # if the path doesn't exist - reset panda = None if not panda: # get base folder (without acccount) panda = os.path.join(home, 'Digital Panda') if not os.path.exists(panda): try: os.makedirs(panda) except: print "TODO: need to handle folder creation failure!" open_folder(panda) def show_advanced_menu(self): menuSize = self.advancedMenu.GetSize() mousePosition = wx.GetMousePosition() pos = (mousePosition[0], mousePosition[1] - menuSize.height) self.advancedMenu.Move(pos) self.advancedMenu.Show() self.advancedMenu.Raise()
#!/usr/bin/env python # # Copyright 2007 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Container of APIProxy stubs for more convenient unittesting. Classes/variables/functions defined here: - `APIProxyStubMap`: Container of APIProxy stubs. - `apiproxy`: Global instance of an APIProxyStubMap. - `MakeSyncCall`: APIProxy entry point. - `UserRPC`: User-visible class wrapping asynchronous RPCs. """ from concurrent import futures import inspect import threading import six from google.appengine.api import apiproxy_rpc from google.appengine.runtime import apiproxy_errors def CreateRPC(service, stubmap=None): """Creates a RPC instance for the given service. The instance is suitable for talking to remote services. Each RPC instance can be used only once, and should not be reused. Args: service: `string`. Represents which service to call. stubmap: Optional `APIProxyStubMap` instance, for dependency injection. Returns: The rpc object. Raises: `AssertionError` or `RuntimeError` if the stub for service doesn't supply a `CreateRPC` method. """ if stubmap is None: stubmap = apiproxy stub = stubmap.GetStub(service) assert stub, 'No api proxy found for service "%s"' % service assert hasattr(stub, 'CreateRPC'), (('The service "%s" doesn\'t have ' + 'a CreateRPC method.') % service) return stub.CreateRPC() def MakeSyncCall(service, call, request, response, stubmap=None): """The APIProxy entry point for a synchronous API call. Args: service: `string`. Represents which service to call. call: `string`. Represents which function to call. request: Protocol buffer for the request. response: Protocol buffer for the response. stubmap: Optional `APIProxyStubMap` instance, for dependency injection. Returns: Response protocol buffer or `None`. Some implementations may return a response protocol buffer instead of modifying `response`. Caller must use returned value in such cases. If `response` is modified then returns `None`. Raises: `apiproxy_errors.Error` or a subclass. """ if stubmap is None: stubmap = apiproxy return stubmap.MakeSyncCall(service, call, request, response) class ListOfHooks(object): """An ordered collection of hooks for a particular API call. A hook is a function that has exactly the same signature as a service stub. It will be called before or after an API hook is executed, depending on whether this list is for precall of postcall hooks. Hooks can be used for debugging purposes (check certain pre- or postconditions on api calls) or to apply patches to protocol buffers before/after a call gets submitted. """ def __init__(self): """Constructor.""" self.__content = [] self.__unique_keys = set() def __len__(self): """Returns the amount of elements in the collection.""" return self.__content.__len__() def __Insert(self, index, key, function, service=None): """Appends a hook at a certain position in the list. Args: index: the index of where to insert the function key: a unique key (within the module) for this particular function. If something from the same module with the same key is already registered, nothing will be added. function: the hook to be added. service: optional argument that restricts the hook to a particular api Returns: True if the collection was modified. """ unique_key = (key, inspect.getmodule(function)) if unique_key in self.__unique_keys: return False argsspec_func = inspect.getfullargspec if six.PY3 else inspect.getargspec num_args = len(argsspec_func(function)[0]) if (inspect.ismethod(function)): num_args -= 1 self.__content.insert(index, (key, function, service, num_args)) self.__unique_keys.add(unique_key) return True def Append(self, key, function, service=None): """Appends a hook at the end of the list. Args: key: A unique key (within the module) for this particular function. If something from the same module with the same key is already registered, nothing will be added. function: The hook to be added. service: Optional argument that restricts the hook to a particular API. Returns: `True` if the collection was modified. """ return self.__Insert(len(self), key, function, service) def Push(self, key, function, service=None): """Inserts a hook at the beginning of the list. Args: key: A unique key (within the module) for this particular function. If something from the same module with the same key is already registered, nothing will be added. function: The hook to be added. service: Optional argument that restricts the hook to a particular API. Returns: `True` if the collection was modified. """ return self.__Insert(0, key, function, service) def Clear(self): """Removes all hooks from the list (useful for unit tests).""" self.__content = [] self.__unique_keys = set() def Call(self, service, call, request, response, rpc=None, error=None): """Invokes all hooks in this collection. NOTE: For backwards compatibility, if error is not `None`, hooks with 4 or 5 arguments are *not* called. This situation (`error=None`) only occurs when the RPC request raised an exception; in the past no hooks would be called at all in that case. Args: service: `string`. Represents which service to call. call: `string`. Representswhich function to call. request: Protocol buffer for the request. response: Protocol buffer for the response. rpc: Optional RPC used to make this call. error: Optional `Exception` instance to be passed as sixth argument. """ for key, function, srv, num_args in self.__content: if srv is None or srv == service: if num_args == 6: function(service, call, request, response, rpc, error) elif error is not None: pass elif num_args == 5: function(service, call, request, response, rpc) else: function(service, call, request, response) class _CancelFuture(futures.Future): pass class WaitCanceller(object): """A helper object that can be used to cancel a `UserRPC.wait_any()` call. An instance of this class can be passed in the RPCs list to `UserRPC.wait_any()` to cancel the wait. """ def __init__(self): self.future = _CancelFuture() self.future._canceller = self def cancel(self): """Indicates that the wait should be cancelled.""" if not self.future.cancelled(): self.future.cancel() self.future.set_running_or_notify_cancel() class APIProxyStubMap(object): """Container of APIProxy stubs for more convenient unittesting. Stubs may be either trivial implementations of APIProxy services (e.g. DatastoreFileStub, UserServiceStub) or "real" implementations. For unittests, we may want to mix and match real and trivial implementations of services in order to better focus testing on individual service implementations. To achieve this, we allow the client to attach stubs to service names, as well as define a default stub to be used if no specific matching stub is identified. """ def __init__(self, default_stub=None): """Constructor. Args: default_stub: optional stub. `default_stub` will be used whenever no specific matching stub is found. """ self.__stub_map = {} self.__default_stub = default_stub self.__precall_hooks = ListOfHooks() self.__postcall_hooks = ListOfHooks() def SetDefaultStub(self, stub): self.__default_stub = stub def GetPreCallHooks(self): """Gets a collection for all precall hooks.""" return self.__precall_hooks def GetPostCallHooks(self): """Gets a collection for all precall hooks.""" return self.__postcall_hooks def ReplaceStub(self, service, stub): """Replace the existing stub for the specified service with a new one. NOTE: This is a risky operation; external callers should use this with caution. Args: service: string stub: stub """ self.__stub_map[service] = stub if service == 'datastore': self.RegisterStub('datastore_v3', stub) def RegisterStub(self, service, stub): """Register the provided stub for the specified service. Args: service: string stub: stub """ assert service not in self.__stub_map, repr(service) self.ReplaceStub(service, stub) def GetStub(self, service): """Retrieve the stub registered for the specified service. Args: service: string Returns: stub Returns the stub registered for 'service', and returns the default stub if no such stub is found. """ return self.__stub_map.get(service, self.__default_stub) def _CopyStubMap(self): """Get a copy of the stub map. For testing only. Returns: Get a shallow copy of the stub map. """ return dict(self.__stub_map) def MakeSyncCall(self, service, call, request, response): """The APIProxy entry point. Args: service: string representing which service to call call: string representing which function to call request: protocol buffer for the request response: protocol buffer for the response Returns: Response protocol buffer or `None`. Some implementations may return a response protocol buffer instead of modifying `response`. Caller must use returned value in such cases. If `response` is modified then returns `None`. Raises: `apiproxy_errors.Error` or a subclass. """ stub = self.GetStub(service) assert stub, 'No api proxy found for service "%s"' % service if hasattr(stub, 'CreateRPC'): rpc = stub.CreateRPC() self.__precall_hooks.Call(service, call, request, response, rpc) try: rpc.MakeCall(service, call, request, response) rpc.Wait() rpc.CheckSuccess() except Exception as err: self.__postcall_hooks.Call(service, call, request, response, rpc, err) raise else: self.__postcall_hooks.Call(service, call, request, response, rpc) else: self.__precall_hooks.Call(service, call, request, response) try: returned_response = stub.MakeSyncCall(service, call, request, response) except Exception as err: self.__postcall_hooks.Call(service, call, request, response, None, err) raise else: self.__postcall_hooks.Call(service, call, request, returned_response or response) return returned_response def CancelApiCalls(self): if self.__default_stub: self.__default_stub.CancelApiCalls() class UserRPC(object): """Wrapper class for asynchronous RPC. Simplest low-level usage pattern: ```python rpc = UserRPC('service', [deadline], [callback]) rpc.make_call('method', request, response) . . . rpc.wait() rpc.check_success() ``` However, a service module normally provides a wrapper so that the typical usage pattern becomes more like this: ```python from google.appengine.api import service rpc = service.create_rpc([deadline], [callback]) service.make_method_call(rpc, [service-specific-args]) . . . rpc.wait() result = rpc.get_result() ``` The `service.make_method_call()` function sets a service- and method- specific hook function that is called by `rpc.get_result()` with the rpc object as its first argument, and service-specific value as its second argument. The hook function should call `rpc.check_success()` and then extract the user-level result from the `rpc.result` protobuffer. Additional arguments may be passed from `make_method_call()` to the `get_result` hook via the second argument. Also note `wait_any()` and `wait_all()`, which wait for multiple RPCs. """ __method = None __get_result_hook = None __user_data = None __postcall_hooks_called = False __must_call_user_callback = False class MyLocal(threading.local): """Class to hold per-thread class level attributes.""" may_interrupt_wait = False __local = MyLocal() def __init__(self, service, deadline=None, callback=None, stubmap=None): """Constructor. Args: service: The service name. deadline: Optional deadline. Default depends on the implementation. callback: Optional argument-less callback function. stubmap: optional APIProxyStubMap instance, for dependency injection. """ if stubmap is None: stubmap = apiproxy self.__stubmap = stubmap self.__service = service self.__rpc = CreateRPC(service, stubmap) self.__rpc.deadline = deadline self.__rpc.callback = self.__internal_callback self.callback = callback self.__class__.__local.may_interrupt_wait = False def __internal_callback(self): """This is the callback set on the low-level RPC object. It sets a flag on the current object indicating that the high-level callback should now be called. If interrupts are enabled, it also interrupts the current wait_any() call by raising an exception. """ self.__must_call_user_callback = True self.__rpc.callback = None if self.__class__.__local.may_interrupt_wait and not self.__rpc.exception: raise apiproxy_errors.InterruptedError(None, self.__rpc) @property def service(self): """Return the service name.""" return self.__service @property def method(self): """Return the method name.""" return self.__method @property def deadline(self): """Return the deadline, if set explicitly (otherwise `None`).""" return self.__rpc.deadline @property def request(self): """Return the request protocol buffer object.""" return self.__rpc.request @property def response(self): """Return the response protocol buffer object.""" return self.__rpc.response @property def state(self): """Return the RPC state. Possible values are attributes of apiproxy_rpc.RPC: IDLE, RUNNING, FINISHING. """ return self.__rpc.state @property def get_result_hook(self): """Return the get-result hook function.""" return self.__get_result_hook @property def user_data(self): """Return the user data for the hook function.""" return self.__user_data @property def future(self): """Return the underlying RPC's future, if present.""" return getattr(self.__rpc, 'future', None) def make_call(self, method, request, response, get_result_hook=None, user_data=None): """Initiate a call. Args: method: The method name. request: The request protocol buffer. response: The response protocol buffer. get_result_hook: Optional get-result hook function. If not `None`, this must be a function with exactly one argument, the RPC object (`self`). Its return value is returned from `get_result()`. user_data: Optional additional arbitrary data for the get-result hook function. This can be accessed as `rpc.user_data`. The type of this value is up to the service module. This function may only be called once per RPC object. It sends the request to the remote server, but does not wait for a response. This allows concurrent execution of the remote call and further local processing (e.g., making additional remote calls). Before the call is initiated, the precall hooks are called. """ assert self.__rpc.state == apiproxy_rpc.RPC.IDLE, repr(self.state) self.__method = method self.__get_result_hook = get_result_hook self.__user_data = user_data self.__stubmap.GetPreCallHooks().Call( self.__service, method, request, response, self.__rpc) self.__rpc.MakeCall(self.__service, method, request, response) def wait(self): """Wait for the call to complete, and call callback if needed. This and `wait_any()`/`wait_all()` are the only time callback functions may be called. (However, note that `check_success()` and `get_result()` call `wait()`.) Waiting for one RPC will not cause callbacks for other RPCs to be called. Callback functions may call `check_success()` and `get_result()`. Callbacks are called without arguments; if a callback needs access to the RPC object a Python nested function (a.k.a. closure) or a bound may be used. To facilitate this, the callback may be assigned after the RPC object is created (but before `make_call()` is called). Note: Don't confuse callbacks with get-result hooks or precall and postcall hooks. """ assert self.__rpc.state != apiproxy_rpc.RPC.IDLE, repr(self.state) if self.__rpc.state == apiproxy_rpc.RPC.RUNNING: self.__rpc.Wait() assert self.__rpc.state == apiproxy_rpc.RPC.FINISHING, repr(self.state) self.__call_user_callback() def __call_user_callback(self): """Call the high-level callback, if requested.""" if self.__must_call_user_callback: self.__must_call_user_callback = False if self.callback is not None: self.callback() def check_success(self): """Check for success of the RPC, possibly raising an exception. This function should be called at least once per RPC. If `wait()` hasn't been called yet, it is called first. If the RPC caused an exceptional condition, an exception will be raised here. The first time `check_success()` is called, the postcall hooks are called. """ self.wait() try: self.__rpc.CheckSuccess() except Exception as err: if not self.__postcall_hooks_called: self.__postcall_hooks_called = True self.__stubmap.GetPostCallHooks().Call(self.__service, self.__method, self.request, self.response, self.__rpc, err) raise else: if not self.__postcall_hooks_called: self.__postcall_hooks_called = True self.__stubmap.GetPostCallHooks().Call(self.__service, self.__method, self.request, self.response, self.__rpc) def get_result(self): """Get the result of the RPC, or possibly raise an exception. This implies a call to `check_success()`. If a get-result hook was passed to `make_call()`, that hook is responsible for calling `check_success()`, and the return value of the hook is returned. Otherwise, `check_success()` is called directly and `None` is returned. """ if self.__get_result_hook is None: self.check_success() return None else: return self.__get_result_hook(self) @classmethod def __is_finished(cls, rpc): """Check if the given RPC is finished or is running. Args: rpc: `UserRPC` instance. Returns: `True` if the RPC is finished. `False` if the RPC is running. """ assert isinstance(rpc, cls), repr(rpc) state = rpc.__rpc.state if state == apiproxy_rpc.RPC.FINISHING: rpc.__call_user_callback() return True assert state != apiproxy_rpc.RPC.IDLE, repr(rpc) return False @classmethod def __get_first_finished_or_last_running(cls, rpcs): """Check the list of RPCs for first one finished, the last one running, or `None`. Args: rpcs: Iterable collection of `UserRPC` instances. Returns: A pair `(finished, running)`, as follows: `(UserRPC, None)` indicating the first RPC found that is finished; `(None, UserRPC)` indicating the last RPC found that is running; `(None, None)` indicating no RPCs are finished or running. """ rpc = None for rpc in rpcs: if cls.__is_finished(rpc): return rpc, None return None, rpc @classmethod def wait_any(cls, rpcs): """Wait until an RPC is finished. A `WaitCanceller` can also be included in the list of RPCs as a mechanism to cancel the wait. Args: rpcs: Iterable collection of `UserRPC` or `WaitCanceller` instances. Returns: A `UserRPC` instance, indicating the first RPC among the given RPCs that finished; or `None`, indicating that either an RPC not among the given RPCs finished in the mean time, or the iterable is empty. NOTES: - Repeatedly calling `wait_any()` with the same arguments will not wait; it will immediately return, meaning it will return the same RPC until one earlier in the collection finishes. The callback, however, will only be called the first time the RPC finishes (which may be here or in the `wait()` method). """ assert iter(rpcs) is not rpcs, 'rpcs must be a collection, not an iterator' rpc_futures = [rpc.future for rpc in rpcs if rpc.future] done, _ = futures.wait(rpc_futures, return_when=futures.FIRST_COMPLETED) if done and done == set(f for f in done if isinstance(f, _CancelFuture)): return next(iter(done))._canceller rpcs = [rpc for rpc in rpcs if not isinstance(rpc, WaitCanceller)] finished, running = cls.__get_first_finished_or_last_running(rpcs) if finished: return finished if running is None: return None try: cls.__local.may_interrupt_wait = True try: running.__rpc.Wait() except apiproxy_errors.InterruptedError as err: err.rpc._exception = None err.rpc._traceback = None finally: cls.__local.may_interrupt_wait = False finished, running = cls.__get_first_finished_or_last_running(rpcs) return finished @classmethod def wait_all(cls, rpcs): """Wait until all given RPCs are finished. This is a thin wrapper around `wait_any()` that loops until all given RPCs have finished. Args: rpcs: Iterable collection of `UserRPC` instances. Returns: None. """ rpcs = set(rpcs) while rpcs: finished = cls.wait_any(rpcs) if finished is not None: rpcs.remove(finished) def GetDefaultAPIProxy(): return APIProxyStubMap() apiproxy = GetDefaultAPIProxy()
#(C) Copyright Syd Logan 2020 #(C) Copyright Thousand Smiles Foundation 2020 # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. # #You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. from rest_framework.views import APIView from rest_framework.exceptions import APIException, NotFound from rest_framework.response import Response from rest_framework.authentication import TokenAuthentication from rest_framework.permissions import IsAuthenticated from enthistory.models import * from enthistoryextra.models import * from datetime import * from django.core import serializers from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, HttpResponseServerError, HttpResponseNotFound from common.decorators import * import sys import numbers import json import logging LOG = logging.getLogger("tscharts") class ENTHistoryExtraView(APIView): authentication_classes = (TokenAuthentication,) permission_classes = (IsAuthenticated,) def durationToString(self, val): ret = None data = {ENTHistory.EAR_DURATION_NONE:"none", ENTHistory.EAR_DURATION_DAYS:"days", ENTHistory.EAR_DURATION_WEEKS:"weeks", ENTHistory.EAR_DURATION_MONTHS:"months", ENTHistory.EAR_DURATION_INTERMITTENT:"intermittent"} try: ret = data[val] except: pass return ret def stringToDuration(self, val): ret = None data = {"none":ENTHistory.EAR_DURATION_NONE, "days":ENTHistory.EAR_DURATION_DAYS, "weeks":ENTHistory.EAR_DURATION_WEEKS, "months":ENTHistory.EAR_DURATION_MONTHS, "intermittent":ENTHistory.EAR_DURATION_INTERMITTENT} try: ret = data[val] except: pass return ret def sideToString(self, val): ret = None data = {ENTHistory.EAR_SIDE_LEFT:"left", ENTHistory.EAR_SIDE_RIGHT:"right", ENTHistory.EAR_SIDE_BOTH:"both", ENTHistory.EAR_SIDE_NONE:"none"} try: ret = data[val] except: pass return ret def stringToSide(self, val): ret = None data = {"left":ENTHistory.EAR_SIDE_LEFT, "right":ENTHistory.EAR_SIDE_RIGHT, "both":ENTHistory.EAR_SIDE_BOTH, "none":ENTHistory.EAR_SIDE_NONE} try: ret = data[val] except: pass return ret def serialize(self, entry): m = {} m["id"] = entry.id m["enthistory"] = entry.enthistory_id m["name"] = entry.name m["duration"] = self.durationToString(entry.duration) m["side"] = self.sideToString(entry.side) return m @log_request def get(self, request, ent_history_extra_id=None, format=None): ent_history_extra = None badRequest = False aENTHistory = None kwargs = {} if ent_history_extra_id: try: ent_history_extra = ENTHistoryExtra.objects.get(id = ent_history_extra_id) except: ent_history_extra = None else: # look for required arguments try: enthistoryid = request.GET.get('enthistory', '') if enthistoryid != '': try: aENTHistory = ENTHistory.objects.get(id=enthistoryid) if not aENTHistory: badRequest = True else: kwargs["enthistory"] = aENTHistory except: badRequest = True except: badRequest = True hasName = False name = None try: name = request.GET.get('name', '') if name != '': hasName = True except: pass # no name subsearch if not badRequest: try: ent_history_extra = ENTHistoryExtra.objects.filter(**kwargs) if hasName == True: # isn't django wonderful, just filter on the result :-) ent_history_extra = ent_history_extra.filter(Q(name__icontains=name)) except: ent_history_extra = None if not ent_history_extra and not badRequest: raise NotFound elif not badRequest: if ent_history_extra_id: ret = self.serialize(ent_history_extra) else: ret = [] for x in ent_history_extra: m = self.serialize(x) ret.append(m) if badRequest: return HttpResponseBadRequest() else: return Response(ret) def validatePostArgs(self, data): valid = True kwargs = data if not "name" in data or not "enthistory" in data or not "duration" in data or not "side" in data: #LOG.info(u'validatePostArgs valid False 1 {}'.format(data)) valid = False if "name" in data and len(data["name"]) == 0: #LOG.info(u'validatePostArgs valid False 2 {}'.format(data)) valid = False try: val = self.stringToDuration(data["duration"]) if val == None: #LOG.info(u'validatePostArgs valid False 3 {}'.format(data)) valid = False else: kwargs["duration"] = val except: #LOG.info(u'validatePostArgs valid False 4 {}'.format(data)) valid = False try: val = self.stringToSide(data["side"]) if val == None: #LOG.info(u'validatePostArgs valid False 5 {}'.format(data)) valid = False else: kwargs["side"] = val except: #LOG.info(u'validatePostArgs valid False 6 {}'.format(data)) valid = False return valid, kwargs def validatePutArgs(self, data, ent_history_extra): valid = True if valid == True: if "name" in data: if len(data["name"]) > 0: ent_history_extra.name = data["name"] else: valid = False try: if "duration" in data: val = self.stringToDuration(data["duration"]) if val == None: valid = False else: ent_history_extra.duration = val except: pass try: if "side" in data: val = self.stringToSide(data["side"]) if val == None: valid = False else: ent_history_extra.side = val except: pass return valid, ent_history_extra @log_request def post(self, request, format=None): badRequest = False implError = False data = json.loads(request.body) try: enthistoryid = int(data["enthistory"]) except: badRequest = True # validate the post data, and get a kwargs dict for # creating the object valid, kwargs = self.validatePostArgs(data) if not valid: badRequest = True if not badRequest: # get the instances try: aENTHistory = ENTHistory.objects.get(id=enthistoryid) except: aENTHistory = None if not aENTHistory: raise NotFound if not badRequest: try: kwargs["enthistory"] = aENTHistory ent_history_extra = ENTHistoryExtra(**kwargs) if ent_history_extra: ent_history_extra.save() else: implError = True except Exception as e: implError = True implMsg = sys.exc_info()[0] if badRequest: return HttpResponseBadRequest() if implError: return HttpResponseServerError(implMsg) else: return Response({'id': ent_history_extra.id}) @log_request def put(self, request, ent_history_extra_id=None, format=None): badRequest = False implError = False notFound = False if not ent_history_extra_id: badRequest = True if not badRequest: ent_history_extra = None try: ent_history_extra = ENTHistoryExtra.objects.get(id=ent_history_extra_id) except: pass if not ent_history_extra: notFound = True else: try: valid = True data = json.loads(request.body) valid, ent_history_extra = self.validatePutArgs(data, ent_history_extra) if valid == True: ent_history_extra.save() else: badRequest = True except: implError = True implMsg = sys.exc_info()[0] if badRequest: return HttpResponseBadRequest() if notFound: return HttpResponseNotFound() if implError: return HttpResponseServerError(implMsg) else: return Response({}) @log_request def delete(self, request, ent_history_extra_id=None, format=None): ent_history_extra = None # see if the ent history extra object exists if not ent_history_extra_id: return HttpResponseBadRequest() try: ent_history_extra = ENTHistoryExtra.objects.get(id=ent_history_extra_id) except: ent_history_extra = None if not ent_history_extra: raise NotFound else: ent_history_extra.delete() return Response({})
""" Test the fastica algorithm. """ import itertools import warnings import pytest import numpy as np from scipy import stats from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.decomposition import FastICA, fastica, PCA from sklearn.decomposition._fastica import _gs_decorrelation from sklearn.exceptions import ConvergenceWarning def center_and_norm(x, axis=-1): """ Centers and norms x **in place** Parameters ----------- x: ndarray Array with an axis of observations (statistical units) measured on random variables. axis: int, optional Axis along which the mean and variance are calculated. """ x = np.rollaxis(x, axis) x -= x.mean(axis=0) x /= x.std(axis=0) def test_gs(): # Test gram schmidt orthonormalization # generate a random orthogonal matrix rng = np.random.RandomState(0) W, _, _ = np.linalg.svd(rng.randn(10, 10)) w = rng.randn(10) _gs_decorrelation(w, W, 10) assert (w ** 2).sum() < 1.e-10 w = rng.randn(10) u = _gs_decorrelation(w, W, 5) tmp = np.dot(u, W.T) assert (tmp[:5] ** 2).sum() < 1.e-10 @pytest.mark.parametrize("add_noise", [True, False]) @pytest.mark.parametrize("seed", range(1)) def test_fastica_simple(add_noise, seed): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(seed) # scipy.stats uses the global RNG: n_samples = 1000 # Generate two sources: s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1 s2 = stats.t.rvs(1, size=n_samples) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing angle phi = 0.6 mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]]) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(2, 1000) center_and_norm(m) # function as fun arg def g_test(x): return x ** 3, (3 * x ** 2).mean(axis=-1) algos = ['parallel', 'deflation'] nls = ['logcosh', 'exp', 'cube', g_test] whitening = [True, False] for algo, nl, whiten in itertools.product(algos, nls, whitening): if whiten: k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo, random_state=rng) with pytest.raises(ValueError): fastica(m.T, fun=np.tanh, algorithm=algo) else: pca = PCA(n_components=2, whiten=True, random_state=rng) X = pca.fit_transform(m.T) k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False, random_state=rng) with pytest.raises(ValueError): fastica(X, fun=np.tanh, algorithm=algo) s_ = s_.T # Check that the mixing model described in the docstring holds: if whiten: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2) else: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1) # Test FastICA class _, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=seed) ica = FastICA(fun=nl, algorithm=algo, random_state=seed) sources = ica.fit_transform(m.T) assert ica.components_.shape == (2, 2) assert sources.shape == (1000, 2) assert_array_almost_equal(sources_fun, sources) assert_array_almost_equal(sources, ica.transform(m.T)) assert ica.mixing_.shape == (2, 2) for fn in [np.tanh, "exp(-.5(x^2))"]: ica = FastICA(fun=fn, algorithm=algo) with pytest.raises(ValueError): ica.fit(m.T) with pytest.raises(TypeError): FastICA(fun=range(10)).fit(m.T) def test_fastica_nowhiten(): m = [[0, 1], [1, 0]] # test for issue #697 ica = FastICA(n_components=1, whiten=False, random_state=0) warn_msg = "Ignoring n_components with whiten=False." with pytest.warns(UserWarning, match=warn_msg): ica.fit(m) assert hasattr(ica, 'mixing_') def test_fastica_convergence_fail(): # Test the FastICA algorithm on very simple data # (see test_non_square_fastica). # Ensure a ConvergenceWarning raised if the tolerance is sufficiently low. rng = np.random.RandomState(0) n_samples = 1000 # Generate two sources: t = np.linspace(0, 100, n_samples) s1 = np.sin(t) s2 = np.ceil(np.sin(np.pi * t)) s = np.c_[s1, s2].T center_and_norm(s) # Mixing matrix mixing = rng.randn(6, 2) m = np.dot(mixing, s) # Do fastICA with tolerance 0. to ensure failing convergence warn_msg = ( "FastICA did not converge. Consider increasing tolerance " "or the maximum number of iterations." ) with pytest.warns(ConvergenceWarning, match=warn_msg): ica = FastICA(algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.) ica.fit(m.T) @pytest.mark.parametrize('add_noise', [True, False]) def test_non_square_fastica(add_noise): # Test the FastICA algorithm on very simple data. rng = np.random.RandomState(0) n_samples = 1000 # Generate two sources: t = np.linspace(0, 100, n_samples) s1 = np.sin(t) s2 = np.ceil(np.sin(np.pi * t)) s = np.c_[s1, s2].T center_and_norm(s) s1, s2 = s # Mixing matrix mixing = rng.randn(6, 2) m = np.dot(mixing, s) if add_noise: m += 0.1 * rng.randn(6, n_samples) center_and_norm(m) k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng) s_ = s_.T # Check that the mixing model described in the docstring holds: assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m)) center_and_norm(s_) s1_, s2_ = s_ # Check to see if the sources have been estimated # in the wrong order if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)): s2_, s1_ = s_ s1_ *= np.sign(np.dot(s1_, s1)) s2_ *= np.sign(np.dot(s2_, s2)) # Check that we have estimated the original sources if not add_noise: assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3) assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3) def test_fit_transform(): # Test FastICA.fit_transform rng = np.random.RandomState(0) X = rng.random_sample((100, 10)) for whiten, n_components in [[True, 5], [False, None]]: n_components_ = (n_components if n_components is not None else X.shape[1]) ica = FastICA(n_components=n_components, whiten=whiten, random_state=0) Xt = ica.fit_transform(X) assert ica.components_.shape == (n_components_, 10) assert Xt.shape == (100, n_components_) ica = FastICA(n_components=n_components, whiten=whiten, random_state=0) ica.fit(X) assert ica.components_.shape == (n_components_, 10) Xt2 = ica.transform(X) assert_array_almost_equal(Xt, Xt2) def test_inverse_transform(): # Test FastICA.inverse_transform n_features = 10 n_samples = 100 n1, n2 = 5, 10 rng = np.random.RandomState(0) X = rng.random_sample((n_samples, n_features)) expected = {(True, n1): (n_features, n1), (True, n2): (n_features, n2), (False, n1): (n_features, n2), (False, n2): (n_features, n2)} for whiten in [True, False]: for n_components in [n1, n2]: n_components_ = (n_components if n_components is not None else X.shape[1]) ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten) with warnings.catch_warnings(record=True): # catch "n_components ignored" warning Xt = ica.fit_transform(X) expected_shape = expected[(whiten, n_components_)] assert ica.mixing_.shape == expected_shape X2 = ica.inverse_transform(Xt) assert X.shape == X2.shape # reversibility test in non-reduction case if n_components == X.shape[1]: assert_array_almost_equal(X, X2) def test_fastica_errors(): n_features = 3 n_samples = 10 rng = np.random.RandomState(0) X = rng.random_sample((n_samples, n_features)) w_init = rng.randn(n_features + 1, n_features + 1) with pytest.raises(ValueError, match='max_iter should be greater than 1'): FastICA(max_iter=0) with pytest.raises(ValueError, match=r'alpha must be in \[1,2\]'): fastica(X, fun_args={'alpha': 0}) with pytest.raises(ValueError, match='w_init has invalid shape.+' r'should be \(3L?, 3L?\)'): fastica(X, w_init=w_init) with pytest.raises(ValueError, match='Invalid algorithm.+must ' 'be.+parallel.+or.+deflation'): fastica(X, algorithm='pizza') @pytest.mark.parametrize('whiten', [True, False]) @pytest.mark.parametrize('return_X_mean', [True, False]) @pytest.mark.parametrize('return_n_iter', [True, False]) def test_fastica_output_shape(whiten, return_X_mean, return_n_iter): n_features = 3 n_samples = 10 rng = np.random.RandomState(0) X = rng.random_sample((n_samples, n_features)) expected_len = 3 + return_X_mean + return_n_iter out = fastica(X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean) assert len(out) == expected_len if not whiten: assert out[0] is None
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License # import os import re import system_test import unittest from subprocess import PIPE from proton import Url, SSLDomain, SSLUnavailable class QdstatTest(system_test.TestCase): """Test qdstat tool output""" @classmethod def setUpClass(cls): super(QdstatTest, cls).setUpClass() config = system_test.Qdrouterd.Config([ ('router', {'id': 'QDR.A', 'workerThreads': 1}), ('listener', {'port': cls.tester.get_port()}), ]) cls.router = cls.tester.qdrouterd('test-router', config) def run_qdstat(self, args, regexp=None, address=None): p = self.popen( ['qdstat', '--bus', str(address or self.router.addresses[0]), '--timeout', str(system_test.TIMEOUT) ] + args, name='qdstat-'+self.id(), stdout=PIPE, expect=None) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) if regexp: assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out) return out def test_help(self): self.run_qdstat(['--help'], r'Usage: qdstat') def test_general(self): out = self.run_qdstat(['--general'], r'(?s)Router Statistics.*Mode\s*Standalone') self.assertTrue("Connections 1" in out) self.assertTrue("Nodes 0" in out) self.assertTrue("Auto Links 0" in out) self.assertTrue("Link Routes 0" in out) self.assertTrue("Router Id QDR.A" in out) self.assertTrue("Mode standalone" in out) def test_connections(self): self.run_qdstat(['--connections'], r'host.*container.*role') def test_links(self): out = self.run_qdstat(['--links'], r'endpoint.*out.*local.*temp.') parts = out.split("\n") self.assertEqual(len(parts), 6) def test_links_with_limit(self): out = self.run_qdstat(['--links', '--limit=1']) parts = out.split("\n") self.assertEqual(len(parts), 5) def test_nodes(self): self.run_qdstat(['--nodes'], r'No Router List') def test_address(self): out = self.run_qdstat(['--address'], r'\$management') parts = out.split("\n") self.assertEqual(len(parts), 8) def test_address_with_limit(self): out = self.run_qdstat(['--address', '--limit=1']) parts = out.split("\n") self.assertEqual(len(parts), 5) def test_memory(self): out = self.run_qdstat(['--memory']) if out.strip() == "No memory statistics available": # router built w/o memory pools enabled] return self.skipTest("Router's memory pools disabled") regexp = r'qdr_address_t\s+[0-9]+' assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out) def test_log(self): self.run_qdstat(['--log', '--limit=5'], r'AGENT \(trace\).*GET-LOG') try: SSLDomain(SSLDomain.MODE_CLIENT) class QdstatSslTest(system_test.TestCase): """Test qdstat tool output""" @staticmethod def ssl_file(name): return os.path.join(system_test.DIR, 'ssl_certs', name) @staticmethod def sasl_path(): return os.path.join(system_test.DIR, 'sasl_configs') @classmethod def setUpClass(cls): super(QdstatSslTest, cls).setUpClass() # Write SASL configuration file: with open('tests-mech-EXTERNAL.conf', 'w') as sasl_conf: sasl_conf.write("mech_list: EXTERNAL ANONYMOUS DIGEST-MD5 PLAIN\n") # qdrouterd configuration: config = system_test.Qdrouterd.Config([ ('router', {'id': 'QDR.B', 'saslConfigPath': os.getcwd(), 'workerThreads': 1, 'saslConfigName': 'tests-mech-EXTERNAL'}), ('sslProfile', {'name': 'server-ssl', 'certDb': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('server-certificate.pem'), 'keyFile': cls.ssl_file('server-private-key.pem'), 'password': 'server-password'}), ('listener', {'port': cls.tester.get_port()}), ('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'yes'}), ('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'no'}), ('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'yes', 'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}) ]) cls.router = cls.tester.qdrouterd('test-router', config) def run_qdstat(self, args, regexp=None, address=None): p = self.popen( ['qdstat', '--bus', str(address or self.router.addresses[0]), '--ssl-disable-peer-name-verify', '--timeout', str(system_test.TIMEOUT) ] + args, name='qdstat-'+self.id(), stdout=PIPE, expect=None) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) if regexp: assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out) return out def get_ssl_args(self): args = dict( trustfile = ['--ssl-trustfile', self.ssl_file('ca-certificate.pem')], bad_trustfile = ['--ssl-trustfile', self.ssl_file('bad-ca-certificate.pem')], client_cert = ['--ssl-certificate', self.ssl_file('client-certificate.pem')], client_key = ['--ssl-key', self.ssl_file('client-private-key.pem')], client_pass = ['--ssl-password', 'client-password']) args['client_cert_all'] = args['client_cert'] + args['client_key'] + args['client_pass'] return args def ssl_test(self, url_name, arg_names): """Run simple SSL connection test with supplied parameters. See test_ssl_* below. """ args = self.get_ssl_args() addrs = [self.router.addresses[i] for i in xrange(4)]; urls = dict(zip(['none', 'strict', 'unsecured', 'auth'], addrs) + zip(['none_s', 'strict_s', 'unsecured_s', 'auth_s'], (Url(a, scheme="amqps") for a in addrs))) self.run_qdstat(['--general'] + sum([args[n] for n in arg_names], []), regexp=r'(?s)Router Statistics.*Mode\s*Standalone', address=str(urls[url_name])) def ssl_test_bad(self, url_name, arg_names): self.assertRaises(AssertionError, self.ssl_test, url_name, arg_names) # Non-SSL enabled listener should fail SSL connections. def test_ssl_none(self): self.ssl_test('none', []) def test_ssl_scheme_to_none(self): self.ssl_test_bad('none_s', []) def test_ssl_cert_to_none(self): self.ssl_test_bad('none', ['client_cert']) # Strict SSL listener, SSL only def test_ssl_none_to_strict(self): self.ssl_test_bad('strict', []) def test_ssl_schema_to_strict(self): self.ssl_test('strict_s', []) def test_ssl_cert_to_strict(self): self.ssl_test('strict_s', ['client_cert_all']) def test_ssl_trustfile_to_strict(self): self.ssl_test('strict_s', ['trustfile']) def test_ssl_trustfile_cert_to_strict(self): self.ssl_test('strict_s', ['trustfile', 'client_cert_all']) def test_ssl_bad_trustfile_to_strict(self): self.ssl_test_bad('strict_s', ['bad_trustfile']) # Require-auth SSL listener def test_ssl_none_to_auth(self): self.ssl_test_bad('auth', []) def test_ssl_schema_to_auth(self): self.ssl_test_bad('auth_s', []) def test_ssl_trustfile_to_auth(self): self.ssl_test_bad('auth_s', ['trustfile']) def test_ssl_cert_to_auth(self): self.ssl_test('auth_s', ['client_cert_all']) def test_ssl_trustfile_cert_to_auth(self): self.ssl_test('auth_s', ['trustfile', 'client_cert_all']) def test_ssl_bad_trustfile_to_auth(self): self.ssl_test_bad('auth_s', ['bad_trustfile', 'client_cert_all']) # Unsecured SSL listener, allows non-SSL def test_ssl_none_to_unsecured(self): self.ssl_test('unsecured', []) def test_ssl_schema_to_unsecured(self): self.ssl_test('unsecured_s', []) def test_ssl_cert_to_unsecured(self): self.ssl_test('unsecured_s', ['client_cert_all']) def test_ssl_trustfile_to_unsecured(self): self.ssl_test('unsecured_s', ['trustfile']) def test_ssl_trustfile_cert_to_unsecured(self): self.ssl_test('unsecured_s', ['trustfile', 'client_cert_all']) def test_ssl_bad_trustfile_to_unsecured(self): self.ssl_test_bad('unsecured_s', ['bad_trustfile']) except SSLUnavailable: class QdstatSslTest(system_test.TestCase): def test_skip(self): self.skipTest("Proton SSL support unavailable.") try: SSLDomain(SSLDomain.MODE_CLIENT) class QdstatSslTestSslPasswordFile(QdstatSslTest): """ Tests the --ssl-password-file command line parameter """ def get_ssl_args(self): args = dict( trustfile = ['--ssl-trustfile', self.ssl_file('ca-certificate.pem')], bad_trustfile = ['--ssl-trustfile', self.ssl_file('bad-ca-certificate.pem')], client_cert = ['--ssl-certificate', self.ssl_file('client-certificate.pem')], client_key = ['--ssl-key', self.ssl_file('client-private-key.pem')], client_pass = ['--ssl-password-file', self.ssl_file('client-password-file.txt')]) args['client_cert_all'] = args['client_cert'] + args['client_key'] + args['client_pass'] return args except SSLUnavailable: class QdstatSslTest(system_test.TestCase): def test_skip(self): self.skipTest("Proton SSL support unavailable.") try: SSLDomain(SSLDomain.MODE_CLIENT) class QdstatSslNoExternalTest(system_test.TestCase): """Test qdstat can't connect without sasl_mech EXTERNAL""" @staticmethod def ssl_file(name): return os.path.join(system_test.DIR, 'ssl_certs', name) @staticmethod def sasl_path(): return os.path.join(system_test.DIR, 'sasl_configs') @classmethod def setUpClass(cls): super(QdstatSslNoExternalTest, cls).setUpClass() # Write SASL configuration file: with open('tests-mech-NOEXTERNAL.conf', 'w') as sasl_conf: sasl_conf.write("mech_list: ANONYMOUS DIGEST-MD5 PLAIN\n") # qdrouterd configuration: config = system_test.Qdrouterd.Config([ ('router', {'id': 'QDR.C', 'saslConfigPath': os.getcwd(), 'workerThreads': 1, 'saslConfigName': 'tests-mech-NOEXTERNAL'}), ('sslProfile', {'name': 'server-ssl', 'certDb': cls.ssl_file('ca-certificate.pem'), 'certFile': cls.ssl_file('server-certificate.pem'), 'keyFile': cls.ssl_file('server-private-key.pem'), 'password': 'server-password'}), ('listener', {'port': cls.tester.get_port()}), ('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'yes'}), ('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'no', 'requireSsl': 'no'}), ('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl', 'authenticatePeer': 'yes', 'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}) ]) cls.router = cls.tester.qdrouterd('test-router', config) def run_qdstat(self, args, regexp=None, address=None): p = self.popen( ['qdstat', '--bus', str(address or self.router.addresses[0]), '--timeout', str(system_test.TIMEOUT) ] + args, name='qdstat-'+self.id(), stdout=PIPE, expect=None) out = p.communicate()[0] assert p.returncode == 0, \ "qdstat exit status %s, output:\n%s" % (p.returncode, out) if regexp: assert re.search(regexp, out, re.I), "Can't find '%s' in '%s'" % (regexp, out) return out def ssl_test(self, url_name, arg_names): """Run simple SSL connection test with supplied parameters. See test_ssl_* below. """ args = dict( trustfile = ['--ssl-trustfile', self.ssl_file('ca-certificate.pem')], bad_trustfile = ['--ssl-trustfile', self.ssl_file('bad-ca-certificate.pem')], client_cert = ['--ssl-certificate', self.ssl_file('client-certificate.pem')], client_key = ['--ssl-key', self.ssl_file('client-private-key.pem')], client_pass = ['--ssl-password', 'client-password']) args['client_cert_all'] = args['client_cert'] + args['client_key'] + args['client_pass'] addrs = [self.router.addresses[i] for i in xrange(4)]; urls = dict(zip(['none', 'strict', 'unsecured', 'auth'], addrs) + zip(['none_s', 'strict_s', 'unsecured_s', 'auth_s'], (Url(a, scheme="amqps") for a in addrs))) self.run_qdstat(['--general'] + sum([args[n] for n in arg_names], []), regexp=r'(?s)Router Statistics.*Mode\s*Standalone', address=str(urls[url_name])) def ssl_test_bad(self, url_name, arg_names): self.assertRaises(AssertionError, self.ssl_test, url_name, arg_names) def test_ssl_cert_to_auth_fail_no_sasl_external(self): self.ssl_test_bad('auth_s', ['client_cert_all']) def test_ssl_trustfile_cert_to_auth_fail_no_sasl_external(self): self.ssl_test_bad('auth_s', ['trustfile', 'client_cert_all']) except SSLUnavailable: class QdstatSslTest(system_test.TestCase): def test_skip(self): self.skipTest("Proton SSL support unavailable.") if __name__ == '__main__': unittest.main(system_test.main_module())
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import paddle.fluid as fluid import paddle.fluid.layers as layers import numpy as np import paddle.fluid.core as core from paddle.fluid import ParamAttr from paddle.fluid.framework import Program, grad_var_name from paddle.fluid.executor import Executor from paddle.fluid.backward import append_backward np.random.seed(123) class PyRNNBase(object): def __init__(self, input_shape, output_shape): self.x = np.ones(shape=input_shape).astype("float32") self.y = np.zeros(shape=output_shape).astype("float32") def step(self, step_id, x): raise NotImplementedError def forward(self): for step_id in range(self.x.shape[0]): self.step(step_id, self.x[step_id]) return np.array([np.mean(self.y)]) def segment_inputs(self): return [self.x[i] for i in range(self.x.shape[0])] class PySimpleRNN1(PyRNNBase): def __init__(self, input_shape, output_shape): super(PySimpleRNN1, self).__init__(input_shape, output_shape) seq_len, batch_size, input_dim = input_shape self.h_boot = np.random.normal(size=(batch_size, input_dim)).astype("float32") self.scale = 1.0 / 2.0 men_dim = (seq_len, batch_size, input_dim) self.mems = np.zeros(shape=men_dim).astype("float32") def step(self, step_id, x): if step_id == 0: pre_mem = self.h_boot else: pre_mem = self.mems[step_id - 1] self.mems[step_id] = (pre_mem + x) * self.scale self.y[step_id] = self.mems[step_id] class PySimpleRNN2(PyRNNBase): def __init__(self, input_shape, output_shape): super(PySimpleRNN2, self).__init__(input_shape, output_shape) seq_len, batch_size, input_dim = input_shape self.W = np.ones(shape=(input_dim, input_dim)).astype("float32") self.U = np.zeros(shape=(input_dim, input_dim)).astype("float32") self.h_boot = np.ones(shape=(batch_size, input_dim)).astype("float32") men_dim = (seq_len, batch_size, input_dim) self.mems = np.zeros(shape=men_dim).astype("float32") def step(self, step_id, x): if step_id > 0: pre_mem = self.mems[step_id - 1] else: pre_mem = self.h_boot xW = np.matmul(x, self.W).astype("float32") hU = np.matmul(pre_mem, self.U).astype("float32") def py_sigmoid(x): return 1. / (1. + np.exp(-x)) self.mems[step_id] = py_sigmoid(xW + hU) self.y[step_id] = self.mems[step_id] def create_tensor(np_data, place): tensor = core.LoDTensor() tensor.set(np_data, place) return tensor class RecurrentOpTest1(unittest.TestCase): ''' Test RNNOp equation: h_t = ( x_t + h_{t-1} ) / scale vars: - x memories: - h outputs: - h ''' input_dim = 2 batch_size = 1 sent_len = 1 def setup_program(self): self.main_program = Program() self.startup_program = Program() self.place = core.CPUPlace() def setUp(self): self.setup_program() self.feed_data_field = {"x", "h_boot"} self.grad_data_field = self.feed_data_field self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape) with fluid.program_guard(self.main_program, self.startup_program): self.output = layers.mean(self.create_rnn_op()) def create_rnn_op(self): x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', append_batch_size=False) x.stop_gradient = False h_boot = layers.data( shape=[self.input_dim], dtype='float32', name='h_boot') h_boot.stop_gradient = False rnn = layers.StaticRNN() with rnn.step(): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) h = layers.scale( x=layers.elementwise_add( x=h_pre, y=x_t), scale=self.py_rnn.scale) rnn.update_memory(h_pre, h) rnn.output(h) return rnn() def forward(self): self.feed_map = { x: create_tensor(getattr(self.py_rnn, x), self.place) for x in self.feed_data_field } exe = Executor(self.place) out = exe.run(self.main_program, feed=self.feed_map, fetch_list=[self.output]) return out[0] def backward(self): self.feed_map = { x: create_tensor(getattr(self.py_rnn, x), self.place) for x in self.feed_data_field } fetch_list = [ self.main_program.global_block().var(grad_var_name(x)) for x in self.grad_data_field ] exe = Executor(self.place) return exe.run(self.main_program, feed=self.feed_map, fetch_list=fetch_list, return_numpy=False) def test_backward(self, rtol=0.01): self.check_forward() with fluid.program_guard(self.main_program, self.startup_program): append_backward(self.output) ana_grad = [np.array(x) for x in self.backward()] num_grad = self.get_numerical_gradient() for idx, name in enumerate(self.grad_data_field): self.assertEqual(num_grad[idx].shape, ana_grad[idx].shape) self.assertTrue( np.isclose( num_grad[idx], ana_grad[idx], rtol=rtol).all(), "num_grad (" + name + ") has diff at " + str(self.place) + "\nExpect " + str(num_grad[idx]) + "\n" + "But Got" + str(ana_grad[idx]) + " in class " + self.__class__.__name__) def check_forward(self): pd_output = self.forward() py_output = self.py_rnn.forward() self.assertEqual(pd_output.shape, py_output.shape) self.assertTrue(np.isclose(pd_output, py_output, rtol=0.01).all()) def get_numerical_gradient(self, delta=0.005): dloss_dout = 1.0 feed_list = [getattr(self.py_rnn, x) for x in self.grad_data_field] grad_list = [np.zeros_like(x) for x in feed_list] for feed, grad in zip(feed_list, grad_list): for f, g in np.nditer([feed, grad], op_flags=['readwrite']): o = float(f) f[...] = o + delta y_pos = self.forward() f[...] = o - delta y_neg = self.forward() f[...] = o dout_dfeed = (y_pos - y_neg) / (delta * 2) g[...] = dout_dfeed[0] return grad_list class RecurrentOpTest2(RecurrentOpTest1): r''' Test RNNOp equation: h_t = \sigma (W x_t + U h_{t-1}) weights: - W - U vars: - x memories: - h outputs: - h ''' input_dim = 2 batch_size = 10 sent_len = 2 def setUp(self): self.setup_program() self.feed_data_field = {"x", "h_boot", "W", "U"} self.grad_data_field = self.feed_data_field self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape) with fluid.program_guard(self.main_program, self.startup_program): self.output = layers.mean(self.create_rnn_op()) def create_rnn_op(self): x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', append_batch_size=False) x.stop_gradient = False h_boot = layers.data( shape=[self.input_dim], dtype='float32', name='h_boot') h_boot.stop_gradient = False rnn = layers.StaticRNN() with rnn.step(): h_pre = rnn.memory(init=h_boot) x_t = rnn.step_input(x) temp_l = layers.fc( input=x_t, size=self.input_dim, param_attr=ParamAttr( name='W', initializer=fluid.initializer.ConstantInitializer(1.0)), bias_attr=False) temp_r = layers.fc( input=h_pre, size=self.input_dim, param_attr=ParamAttr( name='U', initializer=fluid.initializer.ConstantInitializer(0.0)), bias_attr=False) h = layers.sigmoid(x=layers.elementwise_add(x=temp_l, y=temp_r)) rnn.update_memory(h_pre, h) rnn.output(h) return rnn() def test_backward(self): super(RecurrentOpTest2, self).test_backward(rtol=0.01) class RecurrentOpMultipleMemoryTest(RecurrentOpTest1): ''' Test RNNOp with two memories equation: h_1 = h_pre_1 h_2 = h_pre_2 y = h_1 + h_2 vars: - x memories: - h_1, h_2 outputs: - y ''' class PySimpleRNN3(PyRNNBase): def __init__(self, input_shape, output_shape): super(RecurrentOpMultipleMemoryTest.PySimpleRNN3, self).__init__( input_shape, output_shape) seq_len, batch_size, input_dim = input_shape self.h_boot1 = np.random.normal(size=(batch_size, input_dim)).astype("float32") self.h_boot2 = np.random.normal(size=(batch_size, input_dim)).astype("float32") men_dim = (seq_len, batch_size, input_dim) self.mems1 = np.zeros(shape=men_dim).astype("float32") self.mems2 = np.zeros(shape=men_dim).astype("float32") def step(self, step_id, x): if step_id == 0: pre_mem1 = self.h_boot1 pre_mem2 = self.h_boot2 else: pre_mem1 = self.mems1[step_id - 1] pre_mem2 = self.mems2[step_id - 1] self.mems1[step_id] = pre_mem1 self.mems2[step_id] = pre_mem2 self.y[step_id] = self.mems1[step_id] + self.mems2[step_id] + x input_dim = 1 batch_size = 1 sent_len = 2 def setUp(self): self.setup_program() self.feed_data_field = {"x", "h_boot1", "h_boot2"} self.grad_data_field = self.feed_data_field self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3( self.input_shape, self.output_shape) with fluid.program_guard(self.main_program, self.startup_program): self.output = layers.mean(self.create_rnn_op()) def create_rnn_op(self): x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', append_batch_size=False) x.stop_gradient = False h_boot1 = layers.data( shape=[self.batch_size, self.input_dim], dtype='float32', name='h_boot1', append_batch_size=False) h_boot1.stop_gradient = False h_boot2 = layers.data( shape=[self.batch_size, self.input_dim], dtype='float32', name='h_boot2', append_batch_size=False) h_boot2.stop_gradient = False rnn = layers.StaticRNN() with rnn.step(): h_pre1 = rnn.memory(init=h_boot1) h_pre2 = rnn.memory(init=h_boot2) x_t = rnn.step_input(x) mem1 = layers.scale(x=h_pre1, scale=1.0) mem2 = layers.scale(x=h_pre2, scale=1.0) out = layers.sums(input=[mem1, x_t, mem2]) rnn.update_memory(h_pre1, mem1) rnn.update_memory(h_pre2, mem2) rnn.output(out) return rnn() class RecurrentOpNoMemBootTest(RecurrentOpTest1): ''' Test RNNOp with two memories equation: mem = x + mem_pre y = mem vars: - x memories: - mem outputs: - y ''' class PySimpleRNN4(PyRNNBase): def __init__(self, input_shape, output_shape): super(RecurrentOpNoMemBootTest.PySimpleRNN4, self).__init__( input_shape, output_shape) men_dim = input_shape self.mems = np.zeros(shape=men_dim).astype("float32") def step(self, step_id, x): if step_id == 0: pre_mem = np.zeros_like(x) else: pre_mem = self.mems[step_id - 1] self.mems[step_id] = pre_mem + x self.y[step_id] = self.mems[step_id] input_dim = 1 batch_size = 1 sent_len = 2 def setUp(self): self.setup_program() self.feed_data_field = {"x"} self.grad_data_field = self.feed_data_field self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape, self.output_shape) with fluid.program_guard(self.main_program, self.startup_program): self.output = layers.mean(self.create_rnn_op()) def create_rnn_op(self): x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', append_batch_size=False) x.stop_gradient = False rnn = layers.StaticRNN() with rnn.step(): mem_pre = rnn.memory(shape=[-1, self.input_dim], batch_ref=x) x_t = rnn.step_input(x) mem = layers.elementwise_add(x=mem_pre, y=x_t) rnn.update_memory(mem_pre, mem) rnn.output(mem) return rnn() class RecurrentOpSubBlockTest(RecurrentOpTest1): r''' Test RNNOp with subblock variable equation: y_ = emb * w1 h_t = \concat([x, h_{t-1}]) h_t = h_t * w2 h_t = \\unsqueeze(h_t, 1) h_t = \dot_attention(h_t, y_) h_t = \squeeze(h_t, 1) y = h_t vars: - x - w1 - w2 memories: - h outputs: - y ''' class PySimpleRNN5(PyRNNBase): def __init__(self, input_shape, output_shape): super(RecurrentOpSubBlockTest.PySimpleRNN5, self).__init__( input_shape, output_shape) seq_len, batch_size, input_dim = input_shape self.w1 = np.random.uniform( -0.1, 0.1, size=(input_dim, input_dim)).astype("float32") self.w2 = np.random.uniform( -0.1, 0.1, size=(input_dim * 2, input_dim)).astype("float32") self.emb = np.random.uniform( -0.1, 0.1, size=(seq_len, batch_size, input_dim)).astype("float32") men_dim = (seq_len, batch_size, input_dim) self.mems = np.zeros(shape=men_dim).astype("float32") self.oy = np.matmul(self.emb, self.w1) def step(self, step_id, x): def dot_attention(query, memory): attn = np.matmul(query, memory.transpose((0, 2, 1))) weight = softmax(attn) weight_memory = np.matmul(weight, memory) return weight_memory, weight def softmax(x): return np.exp(x) / sum(np.exp(x)) if step_id == 0: pre_mem = np.zeros_like(x) else: pre_mem = self.mems[step_id - 1] concat_in = np.concatenate([x, pre_mem], 1) new_mem = np.matmul(concat_in, self.w2) new_mem = np.expand_dims(new_mem, 1) new_mem, _ = dot_attention(new_mem, self.oy) new_mem = np.squeeze(new_mem, 1) self.mems[step_id] = new_mem self.y[step_id] = self.mems[step_id] input_dim = 2 batch_size = 3 sent_len = 3 def setUp(self): self.setup_program() self.feed_data_field = {"x", "emb", "w1", "w2"} self.grad_data_field = self.feed_data_field self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = RecurrentOpSubBlockTest.PySimpleRNN5(self.input_shape, self.output_shape) with fluid.program_guard(self.main_program, self.startup_program): rnn_out = self.create_rnn_op() self.output = layers.mean(rnn_out) def create_rnn_op(self): x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', name='x', append_batch_size=False) x.stop_gradient = False emb = layers.data( name='emb', shape=[self.sent_len, self.batch_size, self.input_dim], dtype='float32', append_batch_size=False) emb.stop_gradient = False w1 = layers.data( shape=[self.input_dim, self.input_dim], dtype='float32', name='w1', append_batch_size=False) w1.stop_gradient = False w2 = layers.data( shape=[self.input_dim * 2, self.input_dim], dtype='float32', name='w2', append_batch_size=False) w2.stop_gradient = False rnn = layers.StaticRNN() def dot_attention(query, memory): attn = layers.matmul(query, memory, transpose_y=True) weight = layers.softmax(attn) weight_memory = layers.matmul(weight, memory) return weight_memory, weight y = layers.matmul(emb, w1) with rnn.step(): pre_h = rnn.memory( shape=(self.sent_len, self.input_dim), batch_ref=x, init_value=0.0) step_in = rnn.step_input(x) concat_in = layers.concat([step_in, pre_h], 1) new_h = layers.matmul(concat_in, w2) new_h = layers.unsqueeze(new_h, [1]) new_h, _ = dot_attention(new_h, y) new_h = layers.squeeze(new_h, [1]) rnn.update_memory(pre_h, new_h) rnn.step_output(new_h) return rnn() class RecurrentOpStopGradientTest(RecurrentOpTest1): r""" Test RNNOp with stop_gradient = True equation: h_t = \sigma (W x_t + U h_{t-1}) weights: - W - U vars: - x memories: - h output: - h """ input_dim = 2 batch_size = 10 sent_len = 2 def setUp(self): self.setup_program() self.feed_data_field = {"x", "h_boot", "W", "U"} self.grad_data_field = {"x", "W", "U"} self.input_shape = (self.sent_len, self.batch_size, self.input_dim) self.output_shape = (self.sent_len, self.batch_size, self.input_dim) self.py_rnn = PySimpleRNN2(self.input_shape, self.output_shape) with fluid.program_guard(self.main_program, self.startup_program): self.output = layers.mean(self.create_rnn_op()) def create_rnn_op(self): x = layers.data( shape=[self.sent_len, self.batch_size, self.input_dim], dtype="float32", name="x", append_batch_size=False) x.stop_gradient = False h_boot = layers.data( shape=[self.input_dim], dtype="float32", name="h_boot") h_boot.stop_gradient = True rnn = layers.StaticRNN() with rnn.step(): h_pre = rnn.memory(init=h_boot) # init doesn't have gradient x_t = rnn.step_input(x) temp_l = layers.fc( input=x_t, size=self.input_dim, param_attr=ParamAttr( name="W", initializer=fluid.initializer.ConstantInitializer(1.0)), bias_attr=False) temp_r = layers.fc( input=h_pre, size=self.input_dim, param_attr=ParamAttr( name="U", initializer=fluid.initializer.ConstantInitializer(0.0)), bias_attr=False) h = layers.sigmoid(x=layers.elementwise_add(temp_l, temp_r)) rnn.update_memory(h_pre, h) rnn.output(h) return rnn() if __name__ == '__main__': unittest.main()
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from rally import exceptions as rally_exceptions from rally.plugins.openstack.scenarios.nova import servers from tests.unit import fakes from tests.unit import test NOVA_SERVERS_MODULE = "rally.plugins.openstack.scenarios.nova.servers" NOVA_SERVERS = NOVA_SERVERS_MODULE + ".NovaServers" class NovaServersTestCase(test.ScenarioTestCase): def test_boot_rescue_unrescue(self): actions = [{"rescue_unrescue": 5}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._rescue_server = mock.MagicMock() scenario._unrescue_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) self.assertEqual(5, scenario._rescue_server.call_count, "Rescue not called 5 times") self.assertEqual(5, scenario._unrescue_server.call_count, "Unrescue not called 5 times") scenario._rescue_server.assert_has_calls(server_calls) scenario._unrescue_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_stop_start(self): actions = [{"stop_start": 5}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._start_server = mock.MagicMock() scenario._stop_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) self.assertEqual(5, scenario._stop_server.call_count, "Stop not called 5 times") self.assertEqual(5, scenario._start_server.call_count, "Start not called 5 times") scenario._stop_server.assert_has_calls(server_calls) scenario._start_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_multiple_bounce_actions(self): actions = [{"hard_reboot": 5}, {"stop_start": 8}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario._reboot_server = mock.MagicMock() scenario._stop_and_start_server = mock.MagicMock() scenario._generate_random_name = mock.MagicMock(return_value="name") scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) self.assertEqual(5, scenario._reboot_server.call_count, "Reboot not called 5 times") scenario._reboot_server.assert_has_calls(server_calls) server_calls = [] for i in range(8): server_calls.append(mock.call(fake_server)) self.assertEqual(8, scenario._stop_and_start_server.call_count, "Stop/Start not called 8 times") scenario._stop_and_start_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_lock_unlock_and_delete(self): server = fakes.FakeServer() image = fakes.FakeImage() flavor = fakes.FakeFlavor() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.Mock(return_value=server) scenario._lock_server = mock.Mock(side_effect=lambda s: s.lock()) scenario._unlock_server = mock.Mock(side_effect=lambda s: s.unlock()) scenario._delete_server = mock.Mock( side_effect=lambda s, **kwargs: self.assertFalse(getattr(s, "OS-EXT-STS:locked", False))) scenario.boot_lock_unlock_and_delete(image, flavor, fakearg="fakearg") scenario._boot_server.assert_called_once_with(image, flavor, fakearg="fakearg") scenario._lock_server.assert_called_once_with(server) scenario._unlock_server.assert_called_once_with(server) scenario._delete_server.assert_called_once_with(server, force=False) def test_validate_actions(self): actions = [{"hardd_reboot": 6}] scenario = servers.NovaServers(self.context) self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = [{"hard_reboot": "no"}] self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = {"hard_reboot": 6} self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = {"hard_reboot": -1} self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) actions = {"hard_reboot": 0} self.assertRaises(rally_exceptions.InvalidConfigException, scenario.boot_and_bounce_server, 1, 1, actions=actions) def _verify_reboot(self, soft=True): actions = [{"soft_reboot" if soft else "hard_reboot": 5}] fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._reboot_server = mock.MagicMock() scenario._soft_reboot_server = mock.MagicMock() scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario._generate_random_name = mock.MagicMock(return_value="name") scenario.boot_and_bounce_server("img", 1, actions=actions) scenario._boot_server.assert_called_once_with("img", 1) server_calls = [] for i in range(5): server_calls.append(mock.call(fake_server)) if soft: self.assertEqual(5, scenario._soft_reboot_server.call_count, "Reboot not called 5 times") scenario._soft_reboot_server.assert_has_calls(server_calls) else: self.assertEqual(5, scenario._reboot_server.call_count, "Reboot not called 5 times") scenario._reboot_server.assert_has_calls(server_calls) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_soft_reboot(self): self._verify_reboot(soft=True) def test_boot_hard_reboot(self): self._verify_reboot(soft=False) def test_boot_and_delete_server(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario.sleep_between = mock.MagicMock() scenario.boot_and_delete_server("img", 0, 10, 20, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario.sleep_between.assert_called_once_with(10, 20) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_and_delete_multiple_servers(self): scenario = servers.NovaServers(self.context) scenario._boot_servers = mock.Mock() scenario._delete_servers = mock.Mock() scenario.sleep_between = mock.Mock() scenario.boot_and_delete_multiple_servers("img", "flavor", count=15, min_sleep=10, max_sleep=20, fakearg="fakearg") scenario._boot_servers.assert_called_once_with("img", "flavor", 1, instances_amount=15, fakearg="fakearg") scenario.sleep_between.assert_called_once_with(10, 20) scenario._delete_servers.assert_called_once_with( scenario._boot_servers.return_value, force=False) def test_boot_and_list_server(self): scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock() scenario._list_servers = mock.MagicMock() scenario.boot_and_list_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._list_servers.assert_called_once_with(True) def test_suspend_and_resume_server(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._suspend_server = mock.MagicMock() scenario._resume_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.suspend_and_resume_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._suspend_server.assert_called_once_with(fake_server) scenario._resume_server.assert_called_once_with(fake_server) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_pause_and_unpause_server(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._pause_server = mock.MagicMock() scenario._unpause_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.pause_and_unpause_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._pause_server.assert_called_once_with(fake_server) scenario._unpause_server.assert_called_once_with(fake_server) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_shelve_and_unshelve_server(self): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._shelve_server = mock.MagicMock() scenario._unshelve_server = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.shelve_and_unshelve_server("img", 0, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario._shelve_server.assert_called_once_with(fake_server) scenario._unshelve_server.assert_called_once_with(fake_server) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_list_servers(self): scenario = servers.NovaServers(self.context) scenario._list_servers = mock.MagicMock() scenario.list_servers(True) scenario._list_servers.assert_called_once_with(True) def test_boot_server_from_volume_and_delete(self): fake_server = object() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario.sleep_between = mock.MagicMock() scenario._delete_server = mock.MagicMock() fake_volume = fakes.FakeVolumeManager().create() fake_volume.id = "volume_id" scenario._create_volume = mock.MagicMock(return_value=fake_volume) scenario.boot_server_from_volume_and_delete("img", 0, 5, 10, 20, fakearg="f") scenario._create_volume.assert_called_once_with(5, imageRef="img") scenario._boot_server.assert_called_once_with( "img", 0, block_device_mapping={"vda": "volume_id:::1"}, fakearg="f") scenario.sleep_between.assert_called_once_with(10, 20) scenario._delete_server.assert_called_once_with(fake_server, force=False) def _prepare_boot(self, nic=None, assert_nic=False): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._generate_random_name = mock.MagicMock(return_value="name") kwargs = {"fakearg": "f"} expected_kwargs = {"fakearg": "f"} assert_nic = nic or assert_nic if nic: kwargs["nics"] = nic if assert_nic: self.clients("nova").networks.create("net-1") expected_kwargs["nics"] = nic or [{"net-id": "net-2"}] return scenario, kwargs, expected_kwargs def _verify_boot_server(self, nic=None, assert_nic=False): scenario, kwargs, expected_kwargs = self._prepare_boot( nic=nic, assert_nic=assert_nic) scenario.boot_server("img", 0, **kwargs) scenario._boot_server.assert_called_once_with( "img", 0, auto_assign_nic=False, **expected_kwargs) def test_boot_server_no_nics(self): self._verify_boot_server(nic=None, assert_nic=False) def test_boot_server_with_nic(self): self._verify_boot_server(nic=[{"net-id": "net-1"}], assert_nic=True) def test_snapshot_server(self): fake_server = object() fake_image = fakes.FakeImageManager()._create() fake_image.id = "image_id" scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._create_image = mock.MagicMock(return_value=fake_image) scenario._delete_server = mock.MagicMock() scenario._delete_image = mock.MagicMock() scenario.snapshot_server("i", 0, fakearg=2) scenario._boot_server.assert_has_calls([ mock.call("i", 0, fakearg=2), mock.call("image_id", 0, fakearg=2)]) scenario._create_image.assert_called_once_with(fake_server) scenario._delete_server.assert_has_calls([ mock.call(fake_server, force=False), mock.call(fake_server, force=False)]) scenario._delete_image.assert_called_once_with(fake_image) def _test_resize(self, confirm=False): fake_server = object() fake_image = fakes.FakeImageManager()._create() fake_image.id = "image_id" flavor = mock.MagicMock() to_flavor = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._resize_confirm = mock.MagicMock() scenario._resize_revert = mock.MagicMock() scenario._resize = mock.MagicMock() scenario._delete_server = mock.MagicMock() kwargs = {"confirm": confirm} scenario.resize_server(fake_image, flavor, to_flavor, **kwargs) scenario._resize.assert_called_once_with(fake_server, to_flavor) if confirm: scenario._resize_confirm.assert_called_once_with(fake_server) else: scenario._resize_revert.assert_called_once_with(fake_server) def test_resize_with_confirm(self): self._test_resize(confirm=True) def test_resize_with_revert(self): self._test_resize(confirm=False) def test_boot_and_live_migrate_server(self): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.sleep_between = mock.MagicMock() scenario._find_host_to_migrate = mock.MagicMock( return_value="host_name") scenario._live_migrate = mock.MagicMock() scenario._delete_server = mock.MagicMock() scenario.boot_and_live_migrate_server("img", 0, min_sleep=10, max_sleep=20, fakearg="fakearg") scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg") scenario.sleep_between.assert_called_once_with(10, 20) scenario._find_host_to_migrate.assert_called_once_with(fake_server) scenario._live_migrate.assert_called_once_with(fake_server, "host_name", False, False) scenario._delete_server.assert_called_once_with(fake_server) def test_boot_server_from_volume_and_live_migrate(self): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario.sleep_between = mock.MagicMock() scenario._find_host_to_migrate = mock.MagicMock( return_value="host_name") scenario._live_migrate = mock.MagicMock() scenario._delete_server = mock.MagicMock() fake_volume = fakes.FakeVolumeManager().create() fake_volume.id = "volume_id" scenario._create_volume = mock.MagicMock(return_value=fake_volume) scenario.boot_server_from_volume_and_live_migrate("img", 0, 5, min_sleep=10, max_sleep=20, fakearg="f") scenario._create_volume.assert_called_once_with(5, imageRef="img") scenario._boot_server.assert_called_once_with( "img", 0, block_device_mapping={"vda": "volume_id:::1"}, fakearg="f") scenario.sleep_between.assert_called_once_with(10, 20) scenario._find_host_to_migrate.assert_called_once_with(fake_server) scenario._live_migrate.assert_called_once_with(fake_server, "host_name", False, False) scenario._delete_server.assert_called_once_with(fake_server, force=False) def test_boot_server_attach_created_volume_and_live_migrate(self): fake_volume = mock.MagicMock() fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._attach_volume = mock.MagicMock() scenario._detach_volume = mock.MagicMock() scenario.sleep_between = mock.MagicMock() scenario._find_host_to_migrate = mock.MagicMock( return_value="host_name") scenario._live_migrate = mock.MagicMock() scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario._create_volume = mock.MagicMock(return_value=fake_volume) scenario._delete_volume = mock.MagicMock() image = "img" flavor = "flavor" size = 5 boot_kwargs = {"some_var": "asd"} scenario.boot_server_attach_created_volume_and_live_migrate( image, flavor, size, min_sleep=10, max_sleep=20, boot_server_kwargs=boot_kwargs) scenario._boot_server.assert_called_once_with(image, flavor, **boot_kwargs) scenario._create_volume.assert_called_once_with(size) scenario._attach_volume.assert_called_once_with(fake_server, fake_volume) scenario._detach_volume.assert_called_once_with(fake_server, fake_volume) scenario.sleep_between.assert_called_once_with(10, 20) scenario._live_migrate.assert_called_once_with(fake_server, "host_name", False, False) scenario._delete_volume.assert_called_once_with(fake_volume) scenario._delete_server.assert_called_once_with(fake_server) def _test_boot_and_migrate_server(self, confirm=False): fake_server = mock.MagicMock() scenario = servers.NovaServers(self.context) scenario._generate_random_name = mock.MagicMock(return_value="name") scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._stop_server = mock.MagicMock() scenario._migrate = mock.MagicMock() scenario._resize_confirm = mock.MagicMock() scenario._resize_revert = mock.MagicMock() scenario._delete_server = mock.MagicMock() kwargs = {"confirm": confirm} scenario.boot_and_migrate_server("img", 0, fakearg="fakearg", **kwargs) scenario._boot_server.assert_called_once_with("img", 0, fakearg="fakearg", confirm=confirm) scenario._stop_server.assert_called_once_with(fake_server) scenario._migrate.assert_called_once_with(fake_server) if confirm: scenario._resize_confirm.assert_called_once_with(fake_server, status="SHUTOFF") else: scenario._resize_revert.assert_called_once_with(fake_server, status="SHUTOFF") scenario._delete_server.assert_called_once_with(fake_server) def test_boot_and_migrate_server_with_confirm(self): self._test_boot_and_migrate_server(confirm=True) def test_boot_and_migrate_server_with_revert(self): self._test_boot_and_migrate_server(confirm=False) def test_boot_and_rebuild_server(self): scenario = servers.NovaServers(self.context) scenario._boot_server = mock.Mock() scenario._rebuild_server = mock.Mock() scenario._delete_server = mock.Mock() from_image = "img1" to_image = "img2" flavor = "flavor" scenario.boot_and_rebuild_server(from_image, to_image, flavor, fakearg="fakearg") scenario._boot_server.assert_called_once_with(from_image, flavor, fakearg="fakearg") server = scenario._boot_server.return_value scenario._rebuild_server.assert_called_once_with(server, to_image) scenario._delete_server.assert_called_once_with(server) @mock.patch(NOVA_SERVERS_MODULE + ".network_wrapper.wrap") def test_boot_and_associate_floating_ip(self, mock_wrap): scenario = servers.NovaServers(self.context) server = mock.Mock() scenario._boot_server = mock.Mock(return_value=server) scenario._associate_floating_ip = mock.Mock() image = "img" flavor = "flavor" scenario.boot_and_associate_floating_ip(image, flavor, fakearg="fakearg") scenario._boot_server.assert_called_once_with(image, flavor, fakearg="fakearg") net_wrap = mock_wrap.return_value net_wrap.create_floating_ip.assert_called_once_with( tenant_id=server.tenant_id) scenario._associate_floating_ip.assert_called_once_with( server, net_wrap.create_floating_ip.return_value["ip"])
# -*- coding: utf-8 -*- # ================================================================= # # Authors: Tom Kralidis <[email protected]> # # Copyright (c) 2015 Tom Kralidis # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # ================================================================= import logging from pycsw.core import util from pycsw.core.etree import etree LOGGER = logging.getLogger(__name__) class OAIPMH(object): """OAI-PMH wrapper class""" def __init__(self, context, config): LOGGER.debug('Initializing OAI-PMH constants') self.oaipmh_version = '2.0' self.namespaces = { 'oai': 'http://www.openarchives.org/OAI/2.0/', 'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance' } self.request_model = { 'Identify': [], 'ListSets': ['resumptiontoken'], 'ListMetadataFormats': ['identifier'], 'GetRecord': ['identifier', 'metadataprefix'], 'ListRecords': ['from', 'until', 'set', 'resumptiontoken', 'metadataprefix'], 'ListIdentifiers': ['from', 'until', 'set', 'resumptiontoken', 'metadataprefix'], } self.metadata_formats = { 'iso19139': { 'namespace': 'http://www.isotc211.org/2005/gmd', 'schema': 'http://www.isotc211.org/2005/gmd/gmd.xsd', 'identifier': '//gmd:fileIdentifier/gco:CharacterString', 'dateStamp': '//gmd:dateStamp/gco:DateTime|//gmd:dateStamp/gco:Date', 'setSpec': '//gmd:hierarchyLevel/gmd:MD_ScopeCode' }, 'csw-record': { 'namespace': 'http://www.opengis.net/cat/csw/2.0.2', 'schema': 'http://schemas.opengis.net/csw/2.0.2/record.xsd', 'identifier': '//dc:identifier', 'dateStamp': '//dct:modified', 'setSpec': '//dc:type' }, 'fgdc-std': { 'namespace': 'http://www.opengis.net/cat/csw/csdgm', 'schema': 'http://www.fgdc.gov/metadata/fgdc-std-001-1998.xsd', 'identifier': '//idinfo/datasetid', 'dateStamp': '//metainfo/metd', 'setSpec': '//dataset' }, 'oai_dc': { 'namespace': '%soai_dc/' % self.namespaces['oai'], 'schema': 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd', 'identifier': '//dc:identifier', 'dateStamp': '//dct:modified', 'setSpec': '//dc:type' }, 'dif': { 'namespace': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/', 'schema': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/dif.xsd', 'identifier': '//dif:Entry_ID', 'dateStamp': '//dif:Last_DIF_Revision_Date', 'setSpec': '//dataset' }, 'gm03': { 'namespace': 'http://www.interlis.ch/INTERLIS2.3', 'schema': 'http://www.geocat.ch/internet/geocat/en/home/documentation/gm03.parsys.50316.downloadList.86742.DownloadFile.tmp/gm0321.zip', 'identifier': '//gm03:DATASECTION//gm03:fileIdentifer', 'dateStamp': '//gm03:DATASECTION//gm03:dateStamp', 'setSpec': '//dataset' } } self.metadata_sets = { 'datasets': ('Datasets', 'dataset'), 'interactiveResources': ('Interactive Resources', 'service') } self.error_codes = { 'badArgument': 'InvalidParameterValue', 'badVerb': 'OperationNotSupported', 'idDoesNotExist': None, 'noRecordsMatch': None, } self.context = context self.context.namespaces.update(self.namespaces) self.context.namespaces.update({'gco': 'http://www.isotc211.org/2005/gco'}) self.config = config def request(self, kvp): """process OAI-PMH request""" kvpout = {'service': 'CSW', 'version': '2.0.2', 'mode': 'oaipmh'} LOGGER.debug('Incoming kvp: %s', kvp) if 'verb' in kvp: if 'metadataprefix' in kvp: self.metadata_prefix = kvp['metadataprefix'] try: kvpout['outputschema'] = self._get_metadata_prefix(kvp['metadataprefix']) except KeyError: kvpout['outputschema'] = kvp['metadataprefix'] else: self.metadata_prefix = 'csw-record' LOGGER.info('metadataPrefix: %s', self.metadata_prefix) if kvp['verb'] in ['ListRecords', 'ListIdentifiers', 'GetRecord']: kvpout['request'] = 'GetRecords' kvpout['resulttype'] = 'results' kvpout['typenames'] = 'csw:Record' kvpout['elementsetname'] = 'full' if kvp['verb'] in ['Identify', 'ListMetadataFormats', 'ListSets']: kvpout['request'] = 'GetCapabilities' elif kvp['verb'] == 'GetRecord': kvpout['request'] = 'GetRecordById' if 'identifier' in kvp: kvpout['id'] = kvp['identifier'] if ('outputschema' in kvpout and kvp['metadataprefix'] == 'oai_dc'): # just use default DC del kvpout['outputschema'] elif kvp['verb'] in ['ListRecords', 'ListIdentifiers']: if 'resumptiontoken' in kvp: kvpout['startposition'] = kvp['resumptiontoken'] if ('outputschema' in kvpout and kvp['verb'] == 'ListIdentifiers'): # simple output only pass #del kvpout['outputschema'] if ('outputschema' in kvpout and kvp['metadataprefix'] in ['dc', 'oai_dc']): # just use default DC del kvpout['outputschema'] start = end = None LOGGER.info('Scanning temporal parameters') if 'from' in kvp: start = 'dc:date >= %s' % kvp['from'] if 'until' in kvp: end = 'dc:date <= %s' % kvp['until'] if any([start is not None, end is not None]): if all([start is not None, end is not None]): time_query = '%s and %s' % (start, end) elif end is None: time_query = start elif start is None: time_query = end kvpout['constraintlanguage'] = 'CQL_TEXT' kvpout['constraint'] = time_query LOGGER.debug('Resulting parameters: %s', kvpout) return kvpout def response(self, response, kvp, repository, server_url): """process OAI-PMH request""" mode = kvp.pop('mode', None) if 'config' in kvp: config_val = kvp.pop('config') url = '%smode=oaipmh' % util.bind_url(server_url) node = etree.Element(util.nspath_eval('oai:OAI-PMH', self.namespaces), nsmap=self.namespaces) node.set(util.nspath_eval('xsi:schemaLocation', self.namespaces), '%s http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd' % self.namespaces['oai']) LOGGER.info(etree.tostring(node)) etree.SubElement(node, util.nspath_eval('oai:responseDate', self.namespaces)).text = util.get_today_and_now() etree.SubElement(node, util.nspath_eval('oai:request', self.namespaces), attrib=kvp).text = url if 'verb' not in kvp: etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Missing \'verb\' parameter' return node if kvp['verb'] not in self.request_model.keys(): etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Unknown verb \'%s\'' % kvp['verb'] return node if util.xmltag_split(response.tag) == 'ExceptionReport': etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = response.xpath('//ows:ExceptionText|//ows20:ExceptionText', namespaces=self.context.namespaces)[0].text return node verb = kvp.pop('verb') if verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']: if 'metadataprefix' not in kvp: etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Missing metadataPrefix parameter' return node elif kvp['metadataprefix'] not in self.metadata_formats.keys(): etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Invalid metadataPrefix parameter' return node for key, value in kvp.items(): if key != 'mode' and key not in self.request_model[verb]: etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Illegal parameter \'%s\'' % key return node verbnode = etree.SubElement(node, util.nspath_eval('oai:%s' % verb, self.namespaces)) if verb == 'Identify': etree.SubElement(verbnode, util.nspath_eval('oai:repositoryName', self.namespaces)).text = self.config.get('metadata:main', 'identification_title') etree.SubElement(verbnode, util.nspath_eval('oai:baseURL', self.namespaces)).text = url etree.SubElement(verbnode, util.nspath_eval('oai:protocolVersion', self.namespaces)).text = '2.0' etree.SubElement(verbnode, util.nspath_eval('oai:adminEmail', self.namespaces)).text = self.config.get('metadata:main', 'contact_email') etree.SubElement(verbnode, util.nspath_eval('oai:earliestDatestamp', self.namespaces)).text = repository.query_insert('min') etree.SubElement(verbnode, util.nspath_eval('oai:deletedRecord', self.namespaces)).text = 'no' etree.SubElement(verbnode, util.nspath_eval('oai:granularity', self.namespaces)).text = 'YYYY-MM-DDThh:mm:ssZ' elif verb == 'ListSets': for key, value in sorted(self.metadata_sets.items()): setnode = etree.SubElement(verbnode, util.nspath_eval('oai:set', self.namespaces)) etree.SubElement(setnode, util.nspath_eval('oai:setSpec', self.namespaces)).text = key etree.SubElement(setnode, util.nspath_eval('oai:setName', self.namespaces)).text = value[0] elif verb == 'ListMetadataFormats': for key, value in sorted(self.metadata_formats.items()): mdfnode = etree.SubElement(verbnode, util.nspath_eval('oai:metadataFormat', self.namespaces)) etree.SubElement(mdfnode, util.nspath_eval('oai:metadataPrefix', self.namespaces)).text = key etree.SubElement(mdfnode, util.nspath_eval('oai:schema', self.namespaces)).text = value['schema'] etree.SubElement(mdfnode, util.nspath_eval('oai:metadataNamespace', self.namespaces)).text = value['namespace'] elif verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']: if verb == 'GetRecord': # GetRecordById records = response.getchildren() else: # GetRecords records = response.getchildren()[1].getchildren() for child in records: recnode = etree.SubElement(verbnode, util.nspath_eval('oai:record', self.namespaces)) header = etree.SubElement(recnode, util.nspath_eval('oai:header', self.namespaces)) self._transform_element(header, response, 'oai:identifier') self._transform_element(header, response, 'oai:dateStamp') self._transform_element(header, response, 'oai:setSpec') if verb in ['GetRecord', 'ListRecords']: metadata = etree.SubElement(recnode, util.nspath_eval('oai:metadata', self.namespaces)) if 'metadataprefix' in kvp and kvp['metadataprefix'] == 'oai_dc': child.tag = util.nspath_eval('oai_dc:dc', self.namespaces) metadata.append(child) if verb != 'GetRecord': complete_list_size = response.xpath('//@numberOfRecordsMatched')[0] next_record = response.xpath('//@nextRecord')[0] cursor = str(int(complete_list_size) - int(next_record) - 1) resumption_token = etree.SubElement(verbnode, util.nspath_eval('oai:resumptionToken', self.namespaces), completeListSize=complete_list_size, cursor=cursor).text = next_record return node def _get_metadata_prefix(self, prefix): """Convenience function to return metadataPrefix as CSW outputschema""" try: outputschema = self.metadata_formats[prefix]['namespace'] except KeyError: outputschema = prefix return outputschema def _transform_element(self, parent, element, elname): """tests for existence of a given xpath, writes out text if exists""" xpath = self.metadata_formats[self.metadata_prefix][elname.split(':')[1]] if xpath.startswith('//'): value = element.xpath(xpath, namespaces=self.context.namespaces) if value: value = value[0].text else: # bare string literal value = xpath el = etree.SubElement(parent, util.nspath_eval(elname, self.context.namespaces)) if value: if elname == 'oai:setSpec': value = None for k, v in self.metadata_sets.items(): if v[1] == elname: value = k break el.text = value
# -*- coding: utf-8 -*- """ kombu.async.timer ================= Timer scheduling Python callbacks. """ from __future__ import absolute_import import heapq import sys from collections import namedtuple from datetime import datetime from functools import wraps from time import time from weakref import proxy as weakrefproxy from kombu.five import monotonic from kombu.log import get_logger try: from pytz import utc except ImportError: utc = None DEFAULT_MAX_INTERVAL = 2 EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc) IS_PYPY = hasattr(sys, 'pypy_version_info') logger = get_logger(__name__) __all__ = ['Entry', 'Timer', 'to_timestamp'] scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry')) def to_timestamp(d, default_timezone=utc): if isinstance(d, datetime): if d.tzinfo is None: d = d.replace(tzinfo=default_timezone) return max((d - EPOCH).total_seconds(), 0) return d class Entry(object): if not IS_PYPY: # pragma: no cover __slots__ = ( 'fun', 'args', 'kwargs', 'tref', 'cancelled', '_last_run', '__weakref__', ) def __init__(self, fun, args=None, kwargs=None): self.fun = fun self.args = args or [] self.kwargs = kwargs or {} self.tref = weakrefproxy(self) self._last_run = None self.cancelled = False def __call__(self): return self.fun(*self.args, **self.kwargs) def cancel(self): try: self.tref.cancelled = True except ReferenceError: # pragma: no cover pass def __repr__(self): return '<TimerEntry: {0}(*{1!r}, **{2!r})'.format( self.fun.__name__, self.args, self.kwargs) def __hash__(self): return hash((self.fun, repr(self.args), repr(self.kwargs))) # must not use hash() to order entries def __lt__(self, other): return id(self) < id(other) def __gt__(self, other): return id(self) > id(other) def __le__(self, other): return id(self) <= id(other) def __ge__(self, other): return id(self) >= id(other) def __eq__(self, other): return hash(self) == hash(other) def __ne__(self, other): return not self.__eq__(other) class Timer(object): """ETA scheduler.""" Entry = Entry on_error = None def __init__(self, max_interval=None, on_error=None, **kwargs): self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL) self.on_error = on_error or self.on_error self._queue = [] def __enter__(self): return self def __exit__(self, *exc_info): self.stop() def call_at(self, eta, fun, args=(), kwargs={}, priority=0): return self.enter_at(self.Entry(fun, args, kwargs), eta, priority) def call_after(self, secs, fun, args=(), kwargs={}, priority=0): return self.enter_after(secs, self.Entry(fun, args, kwargs), priority) def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0): tref = self.Entry(fun, args, kwargs) @wraps(fun) def _reschedules(*args, **kwargs): last, now = tref._last_run, monotonic() lsince = (now - tref._last_run) if last else secs try: if lsince and lsince >= secs: tref._last_run = now return fun(*args, **kwargs) finally: if not tref.cancelled: last = tref._last_run next = secs - (now - last) if last else secs self.enter_after(next, tref, priority) tref.fun = _reschedules tref._last_run = None return self.enter_after(secs, tref, priority) def enter_at(self, entry, eta=None, priority=0, time=time): """Enter function into the scheduler. :param entry: Item to enter. :keyword eta: Scheduled time as a :class:`datetime.datetime` object. :keyword priority: Unused. """ if eta is None: eta = time() if isinstance(eta, datetime): try: eta = to_timestamp(eta) except Exception as exc: if not self.handle_error(exc): raise return return self._enter(eta, priority, entry) def enter_after(self, secs, entry, priority=0, time=time): return self.enter_at(entry, time() + secs, priority) def _enter(self, eta, priority, entry, push=heapq.heappush): push(self._queue, scheduled(eta, priority, entry)) return entry def apply_entry(self, entry): try: entry() except Exception as exc: if not self.handle_error(exc): logger.error('Error in timer: %r', exc, exc_info=True) def handle_error(self, exc_info): if self.on_error: self.on_error(exc_info) return True def stop(self): pass def __iter__(self, min=min, nowfun=time, pop=heapq.heappop, push=heapq.heappush): """This iterator yields a tuple of ``(entry, wait_seconds)``, where if entry is :const:`None` the caller should wait for ``wait_seconds`` until it polls the schedule again.""" max_interval = self.max_interval queue = self._queue while 1: if queue: eventA = queue[0] now, eta = nowfun(), eventA[0] if now < eta: yield min(eta - now, max_interval), None else: eventB = pop(queue) if eventB is eventA: entry = eventA[2] if not entry.cancelled: yield None, entry continue else: push(queue, eventB) else: yield None, None def clear(self): self._queue[:] = [] # atomic, without creating a new list. def cancel(self, tref): tref.cancel() def __len__(self): return len(self._queue) def __nonzero__(self): return True @property def queue(self, _pop=heapq.heappop): """Snapshot of underlying datastructure.""" events = list(self._queue) return [_pop(v) for v in [events] * len(events)] @property def schedule(self): return self
#! /usr/bin/env python # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Test for google.protobuf.json_format.""" __author__ = '[email protected] (Jie Luo)' import json import math import sys try: import unittest2 as unittest #PY26 except ImportError: import unittest from google.protobuf import any_pb2 from google.protobuf import duration_pb2 from google.protobuf import field_mask_pb2 from google.protobuf import struct_pb2 from google.protobuf import timestamp_pb2 from google.protobuf import wrappers_pb2 from google.protobuf.internal import well_known_types from google.protobuf import json_format from google.protobuf.util import json_format_proto3_pb2 class JsonFormatBase(unittest.TestCase): def FillAllFields(self, message): message.int32_value = 20 message.int64_value = -20 message.uint32_value = 3120987654 message.uint64_value = 12345678900 message.float_value = float('-inf') message.double_value = 3.1415 message.bool_value = True message.string_value = 'foo' message.bytes_value = b'bar' message.message_value.value = 10 message.enum_value = json_format_proto3_pb2.BAR # Repeated message.repeated_int32_value.append(0x7FFFFFFF) message.repeated_int32_value.append(-2147483648) message.repeated_int64_value.append(9007199254740992) message.repeated_int64_value.append(-9007199254740992) message.repeated_uint32_value.append(0xFFFFFFF) message.repeated_uint32_value.append(0x7FFFFFF) message.repeated_uint64_value.append(9007199254740992) message.repeated_uint64_value.append(9007199254740991) message.repeated_float_value.append(0) message.repeated_double_value.append(1E-15) message.repeated_double_value.append(float('inf')) message.repeated_bool_value.append(True) message.repeated_bool_value.append(False) message.repeated_string_value.append('Few symbols!#$,;') message.repeated_string_value.append('bar') message.repeated_bytes_value.append(b'foo') message.repeated_bytes_value.append(b'bar') message.repeated_message_value.add().value = 10 message.repeated_message_value.add().value = 11 message.repeated_enum_value.append(json_format_proto3_pb2.FOO) message.repeated_enum_value.append(json_format_proto3_pb2.BAR) self.message = message def CheckParseBack(self, message, parsed_message): json_format.Parse(json_format.MessageToJson(message), parsed_message) self.assertEqual(message, parsed_message) def CheckError(self, text, error_message): message = json_format_proto3_pb2.TestMessage() self.assertRaisesRegex( json_format.ParseError, error_message, json_format.Parse, text, message) class JsonFormatTest(JsonFormatBase): def testEmptyMessageToJson(self): message = json_format_proto3_pb2.TestMessage() self.assertEqual(json_format.MessageToJson(message), '{}') parsed_message = json_format_proto3_pb2.TestMessage() self.CheckParseBack(message, parsed_message) def testPartialMessageToJson(self): message = json_format_proto3_pb2.TestMessage( string_value='test', repeated_int32_value=[89, 4]) self.assertEqual(json.loads(json_format.MessageToJson(message)), json.loads('{"stringValue": "test", ' '"repeatedInt32Value": [89, 4]}')) parsed_message = json_format_proto3_pb2.TestMessage() self.CheckParseBack(message, parsed_message) def testAllFieldsToJson(self): message = json_format_proto3_pb2.TestMessage() text = ('{"int32Value": 20, ' '"int64Value": "-20", ' '"uint32Value": 3120987654,' '"uint64Value": "12345678900",' '"floatValue": "-Infinity",' '"doubleValue": 3.1415,' '"boolValue": true,' '"stringValue": "foo",' '"bytesValue": "YmFy",' '"messageValue": {"value": 10},' '"enumValue": "BAR",' '"repeatedInt32Value": [2147483647, -2147483648],' '"repeatedInt64Value": ["9007199254740992", "-9007199254740992"],' '"repeatedUint32Value": [268435455, 134217727],' '"repeatedUint64Value": ["9007199254740992", "9007199254740991"],' '"repeatedFloatValue": [0],' '"repeatedDoubleValue": [1e-15, "Infinity"],' '"repeatedBoolValue": [true, false],' '"repeatedStringValue": ["Few symbols!#$,;", "bar"],' '"repeatedBytesValue": ["Zm9v", "YmFy"],' '"repeatedMessageValue": [{"value": 10}, {"value": 11}],' '"repeatedEnumValue": ["FOO", "BAR"]' '}') self.FillAllFields(message) self.assertEqual( json.loads(json_format.MessageToJson(message)), json.loads(text)) parsed_message = json_format_proto3_pb2.TestMessage() json_format.Parse(text, parsed_message) self.assertEqual(message, parsed_message) def testJsonEscapeString(self): message = json_format_proto3_pb2.TestMessage() if sys.version_info[0] < 3: message.string_value = '&\n<\"\r>\b\t\f\\\001/\xe2\x80\xa8\xe2\x80\xa9' else: message.string_value = '&\n<\"\r>\b\t\f\\\001/' message.string_value += (b'\xe2\x80\xa8\xe2\x80\xa9').decode('utf-8') self.assertEqual( json_format.MessageToJson(message), '{\n "stringValue": ' '"&\\n<\\\"\\r>\\b\\t\\f\\\\\\u0001/\\u2028\\u2029"\n}') parsed_message = json_format_proto3_pb2.TestMessage() self.CheckParseBack(message, parsed_message) text = '{"int32Value": "\u0031"}' json_format.Parse(text, message) self.assertEqual(message.int32_value, 1) def testAlwaysSeriliaze(self): message = json_format_proto3_pb2.TestMessage( string_value='foo') self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads('{' '"repeatedStringValue": [],' '"stringValue": "foo",' '"repeatedBoolValue": [],' '"repeatedUint32Value": [],' '"repeatedInt32Value": [],' '"enumValue": "FOO",' '"int32Value": 0,' '"floatValue": 0,' '"int64Value": "0",' '"uint32Value": 0,' '"repeatedBytesValue": [],' '"repeatedUint64Value": [],' '"repeatedDoubleValue": [],' '"bytesValue": "",' '"boolValue": false,' '"repeatedEnumValue": [],' '"uint64Value": "0",' '"doubleValue": 0,' '"repeatedFloatValue": [],' '"repeatedInt64Value": [],' '"repeatedMessageValue": []}')) parsed_message = json_format_proto3_pb2.TestMessage() self.CheckParseBack(message, parsed_message) def testIntegersRepresentedAsFloat(self): message = json_format_proto3_pb2.TestMessage() json_format.Parse('{"int32Value": -2.147483648e9}', message) self.assertEqual(message.int32_value, -2147483648) json_format.Parse('{"int32Value": 1e5}', message) self.assertEqual(message.int32_value, 100000) json_format.Parse('{"int32Value": 1.0}', message) self.assertEqual(message.int32_value, 1) def testMapFields(self): message = json_format_proto3_pb2.TestMap() message.bool_map[True] = 1 message.bool_map[False] = 2 message.int32_map[1] = 2 message.int32_map[2] = 3 message.int64_map[1] = 2 message.int64_map[2] = 3 message.uint32_map[1] = 2 message.uint32_map[2] = 3 message.uint64_map[1] = 2 message.uint64_map[2] = 3 message.string_map['1'] = 2 message.string_map['null'] = 3 self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads('{' '"boolMap": {"false": 2, "true": 1},' '"int32Map": {"1": 2, "2": 3},' '"int64Map": {"1": 2, "2": 3},' '"uint32Map": {"1": 2, "2": 3},' '"uint64Map": {"1": 2, "2": 3},' '"stringMap": {"1": 2, "null": 3}' '}')) parsed_message = json_format_proto3_pb2.TestMap() self.CheckParseBack(message, parsed_message) def testOneofFields(self): message = json_format_proto3_pb2.TestOneof() # Always print does not affect oneof fields. self.assertEqual( json_format.MessageToJson(message, True), '{}') message.oneof_int32_value = 0 self.assertEqual( json_format.MessageToJson(message, True), '{\n' ' "oneofInt32Value": 0\n' '}') parsed_message = json_format_proto3_pb2.TestOneof() self.CheckParseBack(message, parsed_message) def testSurrogates(self): # Test correct surrogate handling. message = json_format_proto3_pb2.TestMessage() json_format.Parse('{"stringValue": "\\uD83D\\uDE01"}', message) self.assertEqual(message.string_value, b'\xF0\x9F\x98\x81'.decode('utf-8', 'strict')) # Error case: unpaired high surrogate. self.CheckError( '{"stringValue": "\\uD83D"}', r'Invalid \\uXXXX escape|Unpaired.*surrogate') # Unpaired low surrogate. self.CheckError( '{"stringValue": "\\uDE01"}', r'Invalid \\uXXXX escape|Unpaired.*surrogate') def testTimestampMessage(self): message = json_format_proto3_pb2.TestTimestamp() message.value.seconds = 0 message.value.nanos = 0 message.repeated_value.add().seconds = 20 message.repeated_value[0].nanos = 1 message.repeated_value.add().seconds = 0 message.repeated_value[1].nanos = 10000 message.repeated_value.add().seconds = 100000000 message.repeated_value[2].nanos = 0 # Maximum time message.repeated_value.add().seconds = 253402300799 message.repeated_value[3].nanos = 999999999 # Minimum time message.repeated_value.add().seconds = -62135596800 message.repeated_value[4].nanos = 0 self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads('{' '"value": "1970-01-01T00:00:00Z",' '"repeatedValue": [' ' "1970-01-01T00:00:20.000000001Z",' ' "1970-01-01T00:00:00.000010Z",' ' "1973-03-03T09:46:40Z",' ' "9999-12-31T23:59:59.999999999Z",' ' "0001-01-01T00:00:00Z"' ']' '}')) parsed_message = json_format_proto3_pb2.TestTimestamp() self.CheckParseBack(message, parsed_message) text = (r'{"value": "1970-01-01T00:00:00.01+08:00",' r'"repeatedValue":[' r' "1970-01-01T00:00:00.01+08:30",' r' "1970-01-01T00:00:00.01-01:23"]}') json_format.Parse(text, parsed_message) self.assertEqual(parsed_message.value.seconds, -8 * 3600) self.assertEqual(parsed_message.value.nanos, 10000000) self.assertEqual(parsed_message.repeated_value[0].seconds, -8.5 * 3600) self.assertEqual(parsed_message.repeated_value[1].seconds, 3600 + 23 * 60) def testDurationMessage(self): message = json_format_proto3_pb2.TestDuration() message.value.seconds = 1 message.repeated_value.add().seconds = 0 message.repeated_value[0].nanos = 10 message.repeated_value.add().seconds = -1 message.repeated_value[1].nanos = -1000 message.repeated_value.add().seconds = 10 message.repeated_value[2].nanos = 11000000 message.repeated_value.add().seconds = -315576000000 message.repeated_value.add().seconds = 315576000000 self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads('{' '"value": "1s",' '"repeatedValue": [' ' "0.000000010s",' ' "-1.000001s",' ' "10.011s",' ' "-315576000000s",' ' "315576000000s"' ']' '}')) parsed_message = json_format_proto3_pb2.TestDuration() self.CheckParseBack(message, parsed_message) def testFieldMaskMessage(self): message = json_format_proto3_pb2.TestFieldMask() message.value.paths.append('foo.bar') message.value.paths.append('bar') self.assertEqual( json_format.MessageToJson(message, True), '{\n' ' "value": "foo.bar,bar"\n' '}') parsed_message = json_format_proto3_pb2.TestFieldMask() self.CheckParseBack(message, parsed_message) def testWrapperMessage(self): message = json_format_proto3_pb2.TestWrapper() message.bool_value.value = False message.int32_value.value = 0 message.string_value.value = '' message.bytes_value.value = b'' message.repeated_bool_value.add().value = True message.repeated_bool_value.add().value = False message.repeated_int32_value.add() self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads('{\n' ' "int32Value": 0,' ' "boolValue": false,' ' "stringValue": "",' ' "bytesValue": "",' ' "repeatedBoolValue": [true, false],' ' "repeatedInt32Value": [0],' ' "repeatedUint32Value": [],' ' "repeatedFloatValue": [],' ' "repeatedDoubleValue": [],' ' "repeatedBytesValue": [],' ' "repeatedInt64Value": [],' ' "repeatedUint64Value": [],' ' "repeatedStringValue": []' '}')) parsed_message = json_format_proto3_pb2.TestWrapper() self.CheckParseBack(message, parsed_message) def testStructMessage(self): message = json_format_proto3_pb2.TestStruct() message.value['name'] = 'Jim' message.value['age'] = 10 message.value['attend'] = True message.value['email'] = None message.value.get_or_create_struct('address')['city'] = 'SFO' message.value['address']['house_number'] = 1024 struct_list = message.value.get_or_create_list('list') struct_list.extend([6, 'seven', True, False, None]) struct_list.add_struct()['subkey2'] = 9 message.repeated_value.add()['age'] = 11 message.repeated_value.add() self.assertEqual( json.loads(json_format.MessageToJson(message, False)), json.loads( '{' ' "value": {' ' "address": {' ' "city": "SFO", ' ' "house_number": 1024' ' }, ' ' "age": 10, ' ' "name": "Jim", ' ' "attend": true, ' ' "email": null, ' ' "list": [6, "seven", true, false, null, {"subkey2": 9}]' ' },' ' "repeatedValue": [{"age": 11}, {}]' '}')) parsed_message = json_format_proto3_pb2.TestStruct() self.CheckParseBack(message, parsed_message) def testValueMessage(self): message = json_format_proto3_pb2.TestValue() message.value.string_value = 'hello' message.repeated_value.add().number_value = 11.1 message.repeated_value.add().bool_value = False message.repeated_value.add().null_value = 0 self.assertEqual( json.loads(json_format.MessageToJson(message, False)), json.loads( '{' ' "value": "hello",' ' "repeatedValue": [11.1, false, null]' '}')) parsed_message = json_format_proto3_pb2.TestValue() self.CheckParseBack(message, parsed_message) # Can't parse back if the Value message is not set. message.repeated_value.add() self.assertEqual( json.loads(json_format.MessageToJson(message, False)), json.loads( '{' ' "value": "hello",' ' "repeatedValue": [11.1, false, null, null]' '}')) message.Clear() json_format.Parse('{"value": null}', message) self.assertEqual(message.value.WhichOneof('kind'), 'null_value') def testListValueMessage(self): message = json_format_proto3_pb2.TestListValue() message.value.values.add().number_value = 11.1 message.value.values.add().null_value = 0 message.value.values.add().bool_value = True message.value.values.add().string_value = 'hello' message.value.values.add().struct_value['name'] = 'Jim' message.repeated_value.add().values.add().number_value = 1 message.repeated_value.add() self.assertEqual( json.loads(json_format.MessageToJson(message, False)), json.loads( '{"value": [11.1, null, true, "hello", {"name": "Jim"}]\n,' '"repeatedValue": [[1], []]}')) parsed_message = json_format_proto3_pb2.TestListValue() self.CheckParseBack(message, parsed_message) def testAnyMessage(self): message = json_format_proto3_pb2.TestAny() value1 = json_format_proto3_pb2.MessageType() value2 = json_format_proto3_pb2.MessageType() value1.value = 1234 value2.value = 5678 message.value.Pack(value1) message.repeated_value.add().Pack(value1) message.repeated_value.add().Pack(value2) message.repeated_value.add() self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads( '{\n' ' "repeatedValue": [ {\n' ' "@type": "type.googleapis.com/proto3.MessageType",\n' ' "value": 1234\n' ' }, {\n' ' "@type": "type.googleapis.com/proto3.MessageType",\n' ' "value": 5678\n' ' },\n' ' {}],\n' ' "value": {\n' ' "@type": "type.googleapis.com/proto3.MessageType",\n' ' "value": 1234\n' ' }\n' '}\n')) parsed_message = json_format_proto3_pb2.TestAny() self.CheckParseBack(message, parsed_message) # Must print @type first test_message = json_format_proto3_pb2.TestMessage( bool_value=True, int32_value=20, int64_value=-20, uint32_value=20, uint64_value=20, double_value=3.14, string_value='foo') message.Clear() message.value.Pack(test_message) self.assertEqual( json_format.MessageToJson(message, False)[0:68], '{\n' ' "value": {\n' ' "@type": "type.googleapis.com/proto3.TestMessage"') def testWellKnownInAnyMessage(self): message = any_pb2.Any() int32_value = wrappers_pb2.Int32Value() int32_value.value = 1234 message.Pack(int32_value) self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads( '{\n' ' "@type": \"type.googleapis.com/google.protobuf.Int32Value\",\n' ' "value": 1234\n' '}\n')) parsed_message = any_pb2.Any() self.CheckParseBack(message, parsed_message) timestamp = timestamp_pb2.Timestamp() message.Pack(timestamp) self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads( '{\n' ' "@type": "type.googleapis.com/google.protobuf.Timestamp",\n' ' "value": "1970-01-01T00:00:00Z"\n' '}\n')) self.CheckParseBack(message, parsed_message) duration = duration_pb2.Duration() duration.seconds = 1 message.Pack(duration) self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads( '{\n' ' "@type": "type.googleapis.com/google.protobuf.Duration",\n' ' "value": "1s"\n' '}\n')) self.CheckParseBack(message, parsed_message) field_mask = field_mask_pb2.FieldMask() field_mask.paths.append('foo.bar') field_mask.paths.append('bar') message.Pack(field_mask) self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads( '{\n' ' "@type": "type.googleapis.com/google.protobuf.FieldMask",\n' ' "value": "foo.bar,bar"\n' '}\n')) self.CheckParseBack(message, parsed_message) struct_message = struct_pb2.Struct() struct_message['name'] = 'Jim' message.Pack(struct_message) self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads( '{\n' ' "@type": "type.googleapis.com/google.protobuf.Struct",\n' ' "value": {"name": "Jim"}\n' '}\n')) self.CheckParseBack(message, parsed_message) nested_any = any_pb2.Any() int32_value.value = 5678 nested_any.Pack(int32_value) message.Pack(nested_any) self.assertEqual( json.loads(json_format.MessageToJson(message, True)), json.loads( '{\n' ' "@type": "type.googleapis.com/google.protobuf.Any",\n' ' "value": {\n' ' "@type": "type.googleapis.com/google.protobuf.Int32Value",\n' ' "value": 5678\n' ' }\n' '}\n')) self.CheckParseBack(message, parsed_message) def testParseNull(self): message = json_format_proto3_pb2.TestMessage() parsed_message = json_format_proto3_pb2.TestMessage() self.FillAllFields(parsed_message) json_format.Parse('{"int32Value": null, ' '"int64Value": null, ' '"uint32Value": null,' '"uint64Value": null,' '"floatValue": null,' '"doubleValue": null,' '"boolValue": null,' '"stringValue": null,' '"bytesValue": null,' '"messageValue": null,' '"enumValue": null,' '"repeatedInt32Value": null,' '"repeatedInt64Value": null,' '"repeatedUint32Value": null,' '"repeatedUint64Value": null,' '"repeatedFloatValue": null,' '"repeatedDoubleValue": null,' '"repeatedBoolValue": null,' '"repeatedStringValue": null,' '"repeatedBytesValue": null,' '"repeatedMessageValue": null,' '"repeatedEnumValue": null' '}', parsed_message) self.assertEqual(message, parsed_message) # Null and {} should have different behavior for sub message. self.assertFalse(parsed_message.HasField('message_value')) json_format.Parse('{"messageValue": {}}', parsed_message) self.assertTrue(parsed_message.HasField('message_value')) # Null is not allowed to be used as an element in repeated field. self.assertRaisesRegex( json_format.ParseError, 'Failed to parse repeatedInt32Value field: ' 'null is not allowed to be used as an element in a repeated field.', json_format.Parse, '{"repeatedInt32Value":[1, null]}', parsed_message) def testNanFloat(self): message = json_format_proto3_pb2.TestMessage() message.float_value = float('nan') text = '{\n "floatValue": "NaN"\n}' self.assertEqual(json_format.MessageToJson(message), text) parsed_message = json_format_proto3_pb2.TestMessage() json_format.Parse(text, parsed_message) self.assertTrue(math.isnan(parsed_message.float_value)) def testParseEmptyText(self): self.CheckError('', r'Failed to load JSON: (Expecting value)|(No JSON).') def testParseEnumValue(self): message = json_format_proto3_pb2.TestMessage() text = '{"enumValue": 0}' json_format.Parse(text, message) text = '{"enumValue": 1}' json_format.Parse(text, message) self.CheckError( '{"enumValue": "baz"}', 'Failed to parse enumValue field: Invalid enum value baz ' 'for enum type proto3.EnumType.') def testParseBadIdentifer(self): self.CheckError('{int32Value: 1}', (r'Failed to load JSON: Expecting property name' r'( enclosed in double quotes)?: line 1')) self.CheckError('{"unknownName": 1}', 'Message type "proto3.TestMessage" has no field named ' '"unknownName".') def testIgnoreUnknownField(self): text = '{"unknownName": 1}' parsed_message = json_format_proto3_pb2.TestMessage() json_format.Parse(text, parsed_message, ignore_unknown_fields=True) text = ('{\n' ' "repeatedValue": [ {\n' ' "@type": "type.googleapis.com/proto3.MessageType",\n' ' "unknownName": 1\n' ' }]\n' '}\n') parsed_message = json_format_proto3_pb2.TestAny() json_format.Parse(text, parsed_message, ignore_unknown_fields=True) def testDuplicateField(self): # Duplicate key check is not supported for python2.6 if sys.version_info < (2, 7): return self.CheckError('{"int32Value": 1,\n"int32Value":2}', 'Failed to load JSON: duplicate key int32Value.') def testInvalidBoolValue(self): self.CheckError('{"boolValue": 1}', 'Failed to parse boolValue field: ' 'Expected true or false without quotes.') self.CheckError('{"boolValue": "true"}', 'Failed to parse boolValue field: ' 'Expected true or false without quotes.') def testInvalidIntegerValue(self): message = json_format_proto3_pb2.TestMessage() text = '{"int32Value": 0x12345}' self.assertRaises(json_format.ParseError, json_format.Parse, text, message) self.CheckError('{"int32Value": 1.5}', 'Failed to parse int32Value field: ' 'Couldn\'t parse integer: 1.5.') self.CheckError('{"int32Value": 012345}', (r'Failed to load JSON: Expecting \'?,\'? delimiter: ' r'line 1.')) self.CheckError('{"int32Value": " 1 "}', 'Failed to parse int32Value field: ' 'Couldn\'t parse integer: " 1 ".') self.CheckError('{"int32Value": "1 "}', 'Failed to parse int32Value field: ' 'Couldn\'t parse integer: "1 ".') self.CheckError('{"int32Value": 12345678901234567890}', 'Failed to parse int32Value field: Value out of range: ' '12345678901234567890.') self.CheckError('{"uint32Value": -1}', 'Failed to parse uint32Value field: ' 'Value out of range: -1.') def testInvalidFloatValue(self): self.CheckError('{"floatValue": "nan"}', 'Failed to parse floatValue field: Couldn\'t ' 'parse float "nan", use "NaN" instead.') def testInvalidBytesValue(self): self.CheckError('{"bytesValue": "AQI"}', 'Failed to parse bytesValue field: Incorrect padding.') self.CheckError('{"bytesValue": "AQI*"}', 'Failed to parse bytesValue field: Incorrect padding.') def testInvalidMap(self): message = json_format_proto3_pb2.TestMap() text = '{"int32Map": {"null": 2, "2": 3}}' self.assertRaisesRegex( json_format.ParseError, 'Failed to parse int32Map field: invalid literal', json_format.Parse, text, message) text = '{"int32Map": {1: 2, "2": 3}}' self.assertRaisesRegex( json_format.ParseError, (r'Failed to load JSON: Expecting property name' r'( enclosed in double quotes)?: line 1'), json_format.Parse, text, message) text = '{"boolMap": {"null": 1}}' self.assertRaisesRegex( json_format.ParseError, 'Failed to parse boolMap field: Expected "true" or "false", not null.', json_format.Parse, text, message) if sys.version_info < (2, 7): return text = r'{"stringMap": {"a": 3, "\u0061": 2}}' self.assertRaisesRegex( json_format.ParseError, 'Failed to load JSON: duplicate key a', json_format.Parse, text, message) def testInvalidTimestamp(self): message = json_format_proto3_pb2.TestTimestamp() text = '{"value": "10000-01-01T00:00:00.00Z"}' self.assertRaisesRegex( json_format.ParseError, 'time data \'10000-01-01T00:00:00\' does not match' ' format \'%Y-%m-%dT%H:%M:%S\'.', json_format.Parse, text, message) text = '{"value": "1970-01-01T00:00:00.0123456789012Z"}' self.assertRaisesRegex( well_known_types.ParseError, 'nanos 0123456789012 more than 9 fractional digits.', json_format.Parse, text, message) text = '{"value": "1972-01-01T01:00:00.01+08"}' self.assertRaisesRegex( well_known_types.ParseError, (r'Invalid timezone offset value: \+08.'), json_format.Parse, text, message) # Time smaller than minimum time. text = '{"value": "0000-01-01T00:00:00Z"}' self.assertRaisesRegex( json_format.ParseError, 'Failed to parse value field: year is out of range.', json_format.Parse, text, message) # Time bigger than maxinum time. message.value.seconds = 253402300800 self.assertRaisesRegex( OverflowError, 'date value out of range', json_format.MessageToJson, message) def testInvalidOneof(self): message = json_format_proto3_pb2.TestOneof() text = '{"oneofInt32Value": 1, "oneofStringValue": "2"}' self.assertRaisesRegex( json_format.ParseError, 'Message type "proto3.TestOneof"' ' should not have multiple "oneof_value" oneof fields.', json_format.Parse, text, message) def testInvalidListValue(self): message = json_format_proto3_pb2.TestListValue() text = '{"value": 1234}' self.assertRaisesRegex( json_format.ParseError, r'Failed to parse value field: ListValue must be in \[\] which is 1234', json_format.Parse, text, message) def testInvalidStruct(self): message = json_format_proto3_pb2.TestStruct() text = '{"value": 1234}' self.assertRaisesRegex( json_format.ParseError, 'Failed to parse value field: Struct must be in a dict which is 1234', json_format.Parse, text, message) def testInvalidAny(self): message = any_pb2.Any() text = '{"@type": "type.googleapis.com/google.protobuf.Int32Value"}' self.assertRaisesRegex( KeyError, 'value', json_format.Parse, text, message) text = '{"value": 1234}' self.assertRaisesRegex( json_format.ParseError, '@type is missing when parsing any message.', json_format.Parse, text, message) text = '{"@type": "type.googleapis.com/MessageNotExist", "value": 1234}' self.assertRaisesRegex( TypeError, 'Can not find message descriptor by type_url: ' 'type.googleapis.com/MessageNotExist.', json_format.Parse, text, message) # Only last part is to be used: b/25630112 text = (r'{"@type": "incorrect.googleapis.com/google.protobuf.Int32Value",' r'"value": 1234}') json_format.Parse(text, message) def testPreservingProtoFieldNames(self): message = json_format_proto3_pb2.TestMessage() message.int32_value = 12345 self.assertEqual('{\n "int32Value": 12345\n}', json_format.MessageToJson(message)) self.assertEqual('{\n "int32_value": 12345\n}', json_format.MessageToJson(message, False, True)) # Parsers accept both original proto field names and lowerCamelCase names. message = json_format_proto3_pb2.TestMessage() json_format.Parse('{"int32Value": 54321}', message) self.assertEqual(54321, message.int32_value) json_format.Parse('{"int32_value": 12345}', message) self.assertEqual(12345, message.int32_value) def testParseDict(self): expected = 12345 js_dict = {'int32Value': expected} message = json_format_proto3_pb2.TestMessage() json_format.ParseDict(js_dict, message) self.assertEqual(expected, message.int32_value) def testMessageToDict(self): message = json_format_proto3_pb2.TestMessage() message.int32_value = 12345 expected = {'int32Value': 12345} self.assertEqual(expected, json_format.MessageToDict(message)) def testJsonName(self): message = json_format_proto3_pb2.TestCustomJsonName() message.value = 12345 self.assertEqual('{\n "@value": 12345\n}', json_format.MessageToJson(message)) parsed_message = json_format_proto3_pb2.TestCustomJsonName() self.CheckParseBack(message, parsed_message) if __name__ == '__main__': unittest.main()
from django.db import models from django.contrib.auth.models import User import calendar import ipaddress import uuid from django.core.exceptions import ValidationError import string from Crypto.PublicKey import RSA from uwsgi_it_api.config import UWSGI_IT_BASE_UID import random import datetime import os.path from django.db.models.signals import post_delete # Create your models here. def generate_uuid(): return str(uuid.uuid4()) def generate_rsa(): return RSA.generate(2048).exportKey() class Customer(models.Model): user = models.OneToOneField(User) vat = models.CharField(max_length=255, blank=True, null=True) company = models.CharField(max_length=255, blank=True, null=True) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) rsa_key = models.TextField(default=generate_rsa, unique=True) admin_note = models.TextField(blank=True, null=True) @property def rsa_key_lines(self): return self.rsa_key.split('\n') @property def rsa_pubkey(self): return RSA.importKey(self.rsa_key).publickey().exportKey() @property def rsa_pubkey_lines(self): return self.rsa_pubkey.split('\n') def __unicode__(self): return self.user.username class Meta: ordering = ['user__username'] class CustomerAttribute(models.Model): customer = models.ForeignKey(Customer) namespace = models.CharField(max_length=255) key = models.CharField(max_length=255) value = models.TextField(blank=True) class Meta: unique_together = ( 'customer', 'namespace', 'key') class Datacenter(models.Model): name = models.CharField(max_length=255, unique=True) description = models.TextField(blank=True, null=True) note = models.TextField(blank=True, null=True) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) def __unicode__(self): return self.name class PrivilegedClient(models.Model): name = models.CharField(max_length=255, unique=True) address = models.GenericIPAddressField() def __unicode__(self): return '{} - {}'.format(self.name, self.address) class Server(models.Model): name = models.CharField(max_length=255, unique=True) address = models.GenericIPAddressField() hd = models.CharField(max_length=255) memory = models.PositiveIntegerField("Memory MB") storage = models.PositiveIntegerField("Storage MB") ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) etc_resolv_conf = models.TextField("/etc/resolv.conf", default='', blank=True) etc_hosts = models.TextField("/etc/hosts", default='', blank=True) weight = models.PositiveIntegerField(default=9999) datacenter = models.ForeignKey('Datacenter', null=True, blank=True) note = models.TextField(blank=True, null=True) owner = models.ForeignKey(Customer, null=True, blank=True) ssd = models.BooleanField('SSD', default=False) portmappings_mtime = models.DateTimeField(auto_now=True) systemd = models.BooleanField('systemd', default=False) @property def used_memory(self): n = self.container_set.all().aggregate(models.Sum('memory'))[ 'memory__sum'] if not n: return 0 return n @property def used_storage(self): n = self.container_set.all().aggregate(models.Sum('storage'))[ 'storage__sum'] if not n: return 0 return n @property def free_memory(self): return self.memory - self.used_memory @property def free_storage(self): return self.storage - self.used_storage def __unicode__(self): features = [] if self.ssd: features.append('SSD') if self.owner: features.append('dedicated') space = '' if features: space = ' ' return "%s - %s%s%s" % ( self.name, self.address, space, ','.join(features)) @property def etc_resolv_conf_lines(self): return self.etc_resolv_conf.replace( '\r', '\n').replace('\n\n', '\n').split('\n') @property def etc_hosts_lines(self): return self.etc_hosts.replace('\r', '\n').replace( '\n\n', '\n').split('\n') @property def munix(self): return calendar.timegm(self.mtime.utctimetuple()) @property def portmappings_munix(self): return calendar.timegm(self.portmappings_mtime.utctimetuple()) class ServerFileMetadata(models.Model): filename = models.CharField(max_length=255, unique=True) def __unicode__(self): return self.filename class ServerMetadata(models.Model): server = models.ForeignKey(Server) metadata = models.ForeignKey(ServerFileMetadata) value = models.TextField(blank=True, null=True) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) def __unicode__(self): return "%s - %s " % (self.server.name, self.metadata.filename) class Meta: unique_together = ( 'server', 'metadata') class Legion(models.Model): name = models.CharField(max_length=255, unique=True) address = models.GenericIPAddressField() ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) note = models.TextField(blank=True, null=True) customer = models.ForeignKey(Customer, null=True, blank=True) key = models.CharField(max_length=64) nodes = models.ManyToManyField(Server, through='LegionNode') quorum = models.PositiveIntegerField(default=0) def __unicode__(self): return "%s - %s " % (self.name, self.address) class LegionNode(models.Model): legion = models.ForeignKey(Legion) server = models.ForeignKey(Server) weight = models.PositiveIntegerField(default=9999) def __unicode__(self): return "%s on %s " % (self.server, self.legion) class FloatingAddress(models.Model): address = models.GenericIPAddressField() customer = models.ForeignKey(Customer, null=True, blank=True) legion = models.ForeignKey(Legion, null=True, blank=True) mapped_to_server = models.ForeignKey(Server, null=True, blank=True) note = models.TextField(blank=True, null=True) def __unicode__(self): return "%s - %s" % (self.address, self.mapped_to_server) class Meta: verbose_name_plural = 'Floating Addresses' class Distro(models.Model): name = models.CharField(max_length=255, unique=True) path = models.CharField(max_length=255, unique=True) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) note = models.TextField(blank=True, null=True) def __unicode__(self): return self.name class CustomDistro(models.Model): container = models.ForeignKey('Container') name = models.CharField(max_length=255) path = models.CharField(max_length=255) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) note = models.TextField(blank=True, null=True) tags = models.ManyToManyField('Tag', blank=True) def __unicode__(self): return self.name def clean(self): allowed = string.letters + string.digits + '._-' for letter in self.path: if letter not in allowed: raise ValidationError( 'invalid path for custom distro, can contains only "%s"' % allowed) class Meta: unique_together = (('container', 'name'), ('container', 'path')) def start_of_epoch(): return datetime.datetime.fromtimestamp(1) class Rule(models.Model): container = models.ForeignKey('Container') direction = models.CharField(max_length=17, choices=(('in', 'in'), ('out', 'out')), blank=False, null=False) src = models.CharField(max_length=30, blank=False, null=False) dst = models.CharField(max_length=30, blank=False, null=False) action = models.CharField(max_length=17, choices=( ('allow', 'allow'), ('deny', 'deny'), ('gateway', 'gateway')), blank=False, null=False) target = models.CharField(max_length=17, blank=True, null=True) priority = models.IntegerField(default=0) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) note = models.TextField(blank=True, null=True) def __unicode__(self): return '{} {} {} {} {} {}'.format(self.container, self.direction, self.src, self.dst, self.action, self.priority) class Meta: ordering = ['-priority'] class Container(models.Model): name = models.CharField(max_length=255) ssh_keys_raw = models.TextField("SSH keys", blank=True, null=True) distro = models.ForeignKey(Distro, null=True, blank=True) server = models.ForeignKey(Server) # in megabytes memory = models.PositiveIntegerField("Memory MB") storage = models.PositiveIntegerField("Storage MB") customer = models.ForeignKey(Customer) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) jid = models.CharField(max_length=255, blank=True, null=True) jid_secret = models.CharField(max_length=255, blank=True, null=True) jid_destinations = models.CharField(max_length=255, blank=True, null=True) pushover_user = models.CharField(max_length=255, blank=True, null=True) pushover_token = models.CharField(max_length=255, blank=True, null=True) pushover_sound = models.CharField(max_length=255, blank=True, null=True) pushbullet_token = models.CharField(max_length=255, blank=True, null=True) slack_webhook = models.CharField(max_length=255, blank=True, null=True) quota_threshold = models.PositiveIntegerField("Quota threshold", default=90) tags = models.ManyToManyField('Tag', blank=True) nofollow = models.BooleanField(default=False) note = models.TextField(blank=True, null=True) accounted = models.BooleanField(default=False) last_reboot = models.DateTimeField(default=start_of_epoch) ssh_keys_mtime = models.DateTimeField(default=start_of_epoch) max_alarms = models.PositiveIntegerField(default=100) alarm_key = models.CharField(max_length=36, null=True, blank=True) alarm_freq = models.PositiveIntegerField(default=60) custom_distros_storage = models.BooleanField(default=False) custom_distro = models.ForeignKey(CustomDistro, null=True, blank=True, related_name='+') admin_note = models.TextField(blank=True, null=True) admin_order = models.CharField(max_length=255, blank=True, null=True) dmz = models.BooleanField(default=False) secret_uuid = models.CharField(max_length=36, blank=True, null=True) def regenerate_secret_uuid(self): self.secret_uuid = generate_uuid() self.save() def __unicode__(self): return "%d (%s)" % (self.uid, self.name) # do not allow over-allocate memory or storage def clean(self): if self.alarm_freq < 60: self.alarm_freq = 60 # hack for empty server value try: if self.server is None: return except: return current_storage = \ self.server.container_set.all().aggregate(models.Sum('storage'))[ 'storage__sum'] current_memory = \ self.server.container_set.all().aggregate(models.Sum('memory'))[ 'memory__sum'] if not current_storage: current_storage = 0 if not current_memory: current_memory = 0 if self.pk: orig = Container.objects.get(pk=self.pk) current_storage -= orig.storage current_memory -= orig.memory if current_storage + self.storage > self.server.storage: raise ValidationError( 'the requested storage size is not available on the specified server') if current_memory + self.memory > self.server.memory: raise ValidationError( 'the requested memory size is not available on the specified server') # force a reboot if required def save(self, *args, **kwargs): interesting_fields = ('name', 'distro', 'server', 'memory', 'storage', 'customer', 'alarm_freq', 'jid', 'jid_secret', 'jid_destinations', 'pushover_user', 'pushover_token', 'pushover_sound', 'pushbullet_token', 'slack_webhook', 'quota_threshold', 'custom_distros_storage', 'custom_distro', 'nofollow', 'dmz') if self.pk is not None: orig = Container.objects.get(pk=self.pk) set_reboot = False for field in interesting_fields: if getattr(self, field) != getattr(orig, field): set_reboot = True break if set_reboot: self.last_reboot = datetime.datetime.now() if self.ssh_keys_raw != orig.ssh_keys_raw: self.ssh_keys_mtime = datetime.datetime.now() super(Container, self).save(*args, **kwargs) @property def combo_alarms(self): alarms = [] if self.pushover_user and self.pushover_token: alarms.append('pushover') if self.pushbullet_token: alarms.append('pushbullet') if self.slack_webhook: alarms.append('slack') if self.jid and self.jid_secret and self.jid_destinations: alarms.append('xmpp') return ','.join(alarms) @property def rand_pid(self): return random.randrange(1, 32768) @property def uid(self): return UWSGI_IT_BASE_UID + self.pk @property def cgroup_uid(self): return self.uid @property def hostname(self): h = '' allowed = string.ascii_letters + string.digits + '-' for char in self.name: if char in allowed: h += char else: h += '-' return h @property def ip(self): # skip the first two addresses (10.0.0.1 for the gateway, 10.0.0.2 for the api) addr = self.pk + 2 addr0 = 0x0a000000; return ipaddress.IPv4Address(addr0 | (addr & 0x00ffffff)) @property def munix(self): return calendar.timegm(self.last_reboot.utctimetuple()) @property def ssh_keys_munix(self): return calendar.timegm(self.ssh_keys_mtime.utctimetuple()) @property def ssh_keys(self): # try to generate a clean list of ssh keys if not self.ssh_keys_raw: return [] cleaned = self.ssh_keys_raw.replace('\r', '\n').replace('\n\n', '\n') return cleaned.split('\n') @property def quota(self): return self.storage * (1024 * 1024) @property def memory_limit_in_bytes(self): return self.memory * (1024 * 1024) @property def links(self): l = [] for link in self.containerlink_set.all(): direction_in = {'direction': 'in', 'src': link.to.ip, 'src_mask': 32, 'dst': link.container.ip, 'dst_mask': 32, 'action': 'allow', 'target': ''} direction_out = {'direction': 'out', 'src': link.container.ip, 'src_mask': 32, 'dst': link.to.ip, 'dst_mask': 32, 'action': 'allow', 'target': ''} if link.container.server != link.to.server: direction_out['action'] = 'gateway' direction_out['target'] = "%s:999" % link.to.server.address l.append(direction_in) l.append(direction_out) return l @property def linked_to(self): return [l.to.uid for l in self.containerlink_set.all()] class ContainerLink(models.Model): container = models.ForeignKey(Container) to = models.ForeignKey(Container, related_name='+') def __unicode__(self): return "%s --> %s" % (self.container, self.to) class Meta: unique_together = ( 'container', 'to') def clean(self): if self.container == self.to: raise ValidationError("cannot link with myself") class Portmap(models.Model): proto = models.CharField(max_length=4, choices=(('tcp',) * 2, ('udp',) * 2)) public_port = models.PositiveIntegerField() container = models.ForeignKey(Container) private_port = models.PositiveIntegerField() ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) def clean(self): if self.public_port < 1024 or self.public_port > 65535: raise ValidationError("invalid public port range") if self.public_port in (1999, 3022, 3026): raise ValidationError("invalid public port range") if self.private_port < 1024 or self.private_port > 65535: raise ValidationError("invalid private port range") @property def munix(self): return calendar.timegm(self.mtime.utctimetuple()) class Meta: verbose_name_plural = 'Port Mapping' unique_together = (('proto', 'public_port', 'container'), ('proto', 'private_port', 'container')) def portmap_post_delete_handler(sender, instance, **kwargs): # this ensure mtiem is not called Server.objects.filter(pk=instance.container.server.pk).update( portmappings_mtime=datetime.datetime.now()) post_delete.connect(portmap_post_delete_handler, Portmap) class Loopbox(models.Model): container = models.ForeignKey(Container) filename = models.CharField(max_length=64) mountpoint = models.CharField(max_length=255) ro = models.BooleanField(default=False) tags = models.ManyToManyField('Tag', blank=True) def clean(self): checks = ('..', './', '/.', '//') starts = ('/',) ends = ('/',) equals = ('etc', 'logs', 'run', 'tmp', 'vassals') for check in checks: if check in self.filename: raise ValidationError("invalid filename") if check in self.mountpoint: raise ValidationError("invalid mountpoint") for start in starts: if self.filename.startswith(start): raise ValidationError("invalid filename") if self.mountpoint.startswith(start): raise ValidationError("invalid mountpoint") for end in ends: if self.filename.endswith(end): raise ValidationError("invalid filename") if self.mountpoint.endswith(end): raise ValidationError("invalid mountpoint") for equal in equals: if self.filename == equal: raise ValidationError("invalid filename") if self.mountpoint == equal: raise ValidationError("invalid mountpoint") class Meta: verbose_name_plural = 'Loopboxes' unique_together = ( ('container', 'filename'), ('container', 'mountpoint')) class Alarm(models.Model): container = models.ForeignKey(Container) unix = models.DateTimeField() level = models.PositiveIntegerField(choices=( (0, 'system'), (1, 'user'), (2, 'exception'), (3, 'traceback'), (4, 'log'))) # in the format #xxxxxx color = models.CharField(max_length=7, default='#ffffff') msg = models.TextField() line = models.PositiveIntegerField(null=True, blank=True) func = models.CharField(max_length=255, null=True, blank=True) filename = models.CharField(max_length=255, null=True, blank=True) _class = models.CharField('class', max_length=255, blank=True, null=True) vassal = models.CharField(max_length=255, blank=True, null=True) def save(self, *args, **kwargs): if len(self.color) != 7: raise ValidationError('invalid color') if not self.color.startswith('#'): raise ValidationError('invalid color') # how many alarms ? alarms = self.container.alarm_set.count() if alarms + 1 > self.container.max_alarms: oldest = self.container.alarm_set.all().order_by('unix')[0] oldest.delete() super(Alarm, self).save(*args, **kwargs) class Meta: ordering = ['-unix'] class Domain(models.Model): """ domains are mapped to customers, each container of the customer can subscribe to them """ name = models.CharField(max_length=255, unique=True) customer = models.ForeignKey(Customer) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) uuid = models.CharField(max_length=36, default=generate_uuid, unique=True) note = models.TextField(blank=True, null=True) tags = models.ManyToManyField('Tag', blank=True) def __unicode__(self): return self.name @property def munix(self): return calendar.timegm(self.mtime.utctimetuple()) class Tag(models.Model): name = models.CharField(max_length=255) customer = models.ForeignKey(Customer) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) note = models.TextField(blank=True, null=True) def __unicode__(self): return self.name class Meta: unique_together = ('name', 'customer') class News(models.Model): content = models.TextField() public = models.BooleanField(default=False) ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) class Meta: ordering = ['-ctime'] verbose_name_plural = 'News' class CustomService(models.Model): """ Pretty low level model for storing customer configurations out of the container concept (like rawrouter services or https non-sni proxies) """ name = models.CharField(max_length=255, unique=True) customer = models.ForeignKey(Customer) server = models.ForeignKey(Server) config = models.TextField() ctime = models.DateTimeField(auto_now_add=True) mtime = models.DateTimeField(auto_now=True) def __unicode__(self): return self.name @property def munix(self): return calendar.timegm(self.mtime.utctimetuple()) class ContainerMetric(models.Model): """ each metric is stored in a different table """ container = models.ForeignKey(Container) year = models.PositiveIntegerField(null=True) month = models.PositiveIntegerField(null=True) day = models.PositiveIntegerField(null=True) # this ia blob containing raw metrics json = models.TextField(null=True) def __unicode__(self): return "%s-%s-%s" % (self.year, self.month, self.day) class Meta: abstract = True unique_together = ('container', 'year', 'month', 'day') class DomainMetric(models.Model): domain = models.ForeignKey(Domain) container = models.ForeignKey(Container) year = models.PositiveIntegerField(null=True) month = models.PositiveIntegerField(null=True) day = models.PositiveIntegerField(null=True) # this ia blob containing raw metrics json = models.TextField(null=True) def __unicode__(self): return "%s-%s-%s" % (self.year, self.month, self.day) class Meta: abstract = True unique_together = ('domain', 'container', 'year', 'month', 'day') # real metrics now class NetworkRXContainerMetric(ContainerMetric): """ stores values from the tuntap router """ pass class NetworkTXContainerMetric(ContainerMetric): """ stores values from the tuntap router """ pass class CPUContainerMetric(ContainerMetric): """ stores values from the container cgroup """ pass # stores values from the container cgroup class MemoryContainerMetric(ContainerMetric): pass # stores values from the container cgroup class MemoryRSSContainerMetric(ContainerMetric): pass class MemoryCacheContainerMetric(ContainerMetric): """ stores values from the container cgroup """ pass class IOReadContainerMetric(ContainerMetric): """ stores values from the container cgroup """ pass class IOWriteContainerMetric(ContainerMetric): """ stores values from the container cgroup """ pass class QuotaContainerMetric(ContainerMetric): """ uses perl Quota package """ pass class HitsDomainMetric(DomainMetric): pass class NetworkRXDomainMetric(DomainMetric): pass class NetworkTXDomainMetric(DomainMetric): pass