gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import string
from sympy import (
Symbol, symbols, Dummy, S, Sum, Rational, oo, zoo, pi, I, simplify,
expand_func, diff, EulerGamma, cancel, re, im)
from sympy.functions import (
bernoulli, harmonic, bell, fibonacci, lucas, euler, catalan, binomial,
gamma, sqrt, hyper, log, digamma, trigamma, polygamma, factorial, sin,
cos, cot, zeta)
from sympy.utilities.pytest import XFAIL, raises
x = Symbol('x')
def test_bernoulli():
assert bernoulli(0) == 1
assert bernoulli(1) == Rational(-1, 2)
assert bernoulli(2) == Rational(1, 6)
assert bernoulli(3) == 0
assert bernoulli(4) == Rational(-1, 30)
assert bernoulli(5) == 0
assert bernoulli(6) == Rational(1, 42)
assert bernoulli(7) == 0
assert bernoulli(8) == Rational(-1, 30)
assert bernoulli(10) == Rational(5, 66)
assert bernoulli(1000001) == 0
assert bernoulli(0, x) == 1
assert bernoulli(1, x) == x - Rational(1, 2)
assert bernoulli(2, x) == x**2 - x + Rational(1, 6)
assert bernoulli(3, x) == x**3 - (3*x**2)/2 + x/2
# Should be fast; computed with mpmath
b = bernoulli(1000)
assert b.p % 10**10 == 7950421099
assert b.q == 342999030
b = bernoulli(10**6, evaluate=False).evalf()
assert str(b) == '-2.23799235765713e+4767529'
def test_fibonacci():
assert [fibonacci(n) for n in range(-3, 5)] == [2, -1, 1, 0, 1, 1, 2, 3]
assert fibonacci(100) == 354224848179261915075
assert [lucas(n) for n in range(-3, 5)] == [-4, 3, -1, 2, 1, 3, 4, 7]
assert lucas(100) == 792070839848372253127
assert fibonacci(1, x) == 1
assert fibonacci(2, x) == x
assert fibonacci(3, x) == x**2 + 1
assert fibonacci(4, x) == x**3 + 2*x
def test_bell():
assert [bell(n) for n in range(8)] == [1, 1, 2, 5, 15, 52, 203, 877]
assert bell(0, x) == 1
assert bell(1, x) == x
assert bell(2, x) == x**2 + x
assert bell(5, x) == x**5 + 10*x**4 + 25*x**3 + 15*x**2 + x
X = symbols('x:6')
# X = (x0, x1, .. x5)
# at the same time: X[1] = x1, X[2] = x2 for standard readablity.
# but we must supply zero-based indexed object X[1:] = (x1, .. x5)
assert bell(6, 2, X[1:]) == 6*X[5]*X[1] + 15*X[4]*X[2] + 10*X[3]**2
assert bell(
6, 3, X[1:]) == 15*X[4]*X[1]**2 + 60*X[3]*X[2]*X[1] + 15*X[2]**3
X = (1, 10, 100, 1000, 10000)
assert bell(6, 2, X) == (6 + 15 + 10)*10000
X = (1, 2, 3, 3, 5)
assert bell(6, 2, X) == 6*5 + 15*3*2 + 10*3**2
X = (1, 2, 3, 5)
assert bell(6, 3, X) == 15*5 + 60*3*2 + 15*2**3
def test_harmonic():
n = Symbol("n")
assert harmonic(n, 0) == n
assert harmonic(n).evalf() == harmonic(n)
assert harmonic(n, 1) == harmonic(n)
assert harmonic(1, n).evalf() == harmonic(1, n)
assert harmonic(0, 1) == 0
assert harmonic(1, 1) == 1
assert harmonic(2, 1) == Rational(3, 2)
assert harmonic(3, 1) == Rational(11, 6)
assert harmonic(4, 1) == Rational(25, 12)
assert harmonic(0, 2) == 0
assert harmonic(1, 2) == 1
assert harmonic(2, 2) == Rational(5, 4)
assert harmonic(3, 2) == Rational(49, 36)
assert harmonic(4, 2) == Rational(205, 144)
assert harmonic(0, 3) == 0
assert harmonic(1, 3) == 1
assert harmonic(2, 3) == Rational(9, 8)
assert harmonic(3, 3) == Rational(251, 216)
assert harmonic(4, 3) == Rational(2035, 1728)
assert harmonic(oo, -1) == S.NaN
assert harmonic(oo, 0) == oo
assert harmonic(oo, S.Half) == oo
assert harmonic(oo, 1) == oo
assert harmonic(oo, 2) == (pi**2)/6
assert harmonic(oo, 3) == zeta(3)
def test_harmonic_rational():
ne = S(6)
no = S(5)
pe = S(8)
po = S(9)
qe = S(10)
qo = S(13)
Heee = harmonic(ne + pe/qe)
Aeee = (-log(10) + 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ pi*(1/S(4) + sqrt(5)/4)/(2*sqrt(-sqrt(5)/8 + 5/S(8)))
+ 13944145/S(4720968))
Heeo = harmonic(ne + pe/qo)
Aeeo = (-log(26) + 2*log(sin(3*pi/13))*cos(4*pi/13) + 2*log(sin(2*pi/13))*cos(32*pi/13)
+ 2*log(sin(5*pi/13))*cos(80*pi/13) - 2*log(sin(6*pi/13))*cos(5*pi/13)
- 2*log(sin(4*pi/13))*cos(pi/13) + pi*cot(5*pi/13)/2 - 2*log(sin(pi/13))*cos(3*pi/13)
+ 2422020029/S(702257080))
Heoe = harmonic(ne + po/qe)
Aeoe = (-log(20) + 2*(1/S(4) + sqrt(5)/4)*log(-1/S(4) + sqrt(5)/4)
+ 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 + 1/S(4))*log(1/S(4) + sqrt(5)/4)
+ 11818877030/S(4286604231) - pi*sqrt(sqrt(5)/8 + 5/S(8))/(-sqrt(5)/2 + 1/S(2)) )
Heoo = harmonic(ne + po/qo)
Aeoo = (-log(26) + 2*log(sin(3*pi/13))*cos(54*pi/13) + 2*log(sin(4*pi/13))*cos(6*pi/13)
+ 2*log(sin(6*pi/13))*cos(108*pi/13) - 2*log(sin(5*pi/13))*cos(pi/13)
- 2*log(sin(pi/13))*cos(5*pi/13) + pi*cot(4*pi/13)/2
- 2*log(sin(2*pi/13))*cos(3*pi/13) + 11669332571/S(3628714320))
Hoee = harmonic(no + pe/qe)
Aoee = (-log(10) + 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ pi*(1/S(4) + sqrt(5)/4)/(2*sqrt(-sqrt(5)/8 + 5/S(8)))
+ 779405/S(277704))
Hoeo = harmonic(no + pe/qo)
Aoeo = (-log(26) + 2*log(sin(3*pi/13))*cos(4*pi/13) + 2*log(sin(2*pi/13))*cos(32*pi/13)
+ 2*log(sin(5*pi/13))*cos(80*pi/13) - 2*log(sin(6*pi/13))*cos(5*pi/13)
- 2*log(sin(4*pi/13))*cos(pi/13) + pi*cot(5*pi/13)/2
- 2*log(sin(pi/13))*cos(3*pi/13) + 53857323/S(16331560))
Hooe = harmonic(no + po/qe)
Aooe = (-log(20) + 2*(1/S(4) + sqrt(5)/4)*log(-1/S(4) + sqrt(5)/4)
+ 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 + 1/S(4))*log(1/S(4) + sqrt(5)/4)
+ 486853480/S(186374097) - pi*sqrt(sqrt(5)/8 + 5/S(8))/(2*(-sqrt(5)/4 + 1/S(4))))
Hooo = harmonic(no + po/qo)
Aooo = (-log(26) + 2*log(sin(3*pi/13))*cos(54*pi/13) + 2*log(sin(4*pi/13))*cos(6*pi/13)
+ 2*log(sin(6*pi/13))*cos(108*pi/13) - 2*log(sin(5*pi/13))*cos(pi/13)
- 2*log(sin(pi/13))*cos(5*pi/13) + pi*cot(4*pi/13)/2
- 2*log(sin(2*pi/13))*cos(3*pi/13) + 383693479/S(125128080))
H = [Heee, Heeo, Heoe, Heoo, Hoee, Hoeo, Hooe, Hooo]
A = [Aeee, Aeeo, Aeoe, Aeoo, Aoee, Aoeo, Aooe, Aooo]
for h, a in zip(H, A):
e = expand_func(h).doit()
assert cancel(e/a) == 1
assert h.n() == a.n()
def test_harmonic_evalf():
assert str(harmonic(1.5).evalf(n=10)) == '1.280372306'
assert str(harmonic(1.5, 2).evalf(n=10)) == '1.154576311' # issue 7443
def test_harmonic_rewrite_polygamma():
n = Symbol("n")
m = Symbol("m")
assert harmonic(n).rewrite(digamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n).rewrite(trigamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n).rewrite(polygamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n,3).rewrite(polygamma) == polygamma(2, n + 1)/2 - polygamma(2, 1)/2
assert harmonic(n,m).rewrite(polygamma) == (-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
assert expand_func(harmonic(n+4)) == harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
assert expand_func(harmonic(n-4)) == harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
assert harmonic(n, m).rewrite("tractable") == harmonic(n, m).rewrite(polygamma)
@XFAIL
def test_harmonic_limit_fail():
n = Symbol("n")
m = Symbol("m")
# For m > 1:
assert limit(harmonic(n, m), n, oo) == zeta(m)
@XFAIL
def test_harmonic_rewrite_sum_fail():
n = Symbol("n")
m = Symbol("m")
_k = Dummy("k")
assert harmonic(n).rewrite(Sum) == Sum(1/_k, (_k, 1, n))
assert harmonic(n, m).rewrite(Sum) == Sum(_k**(-m), (_k, 1, n))
def replace_dummy(expr, sym):
dum = expr.atoms(Dummy)
if not dum:
return expr
assert len(dum) == 1
return expr.xreplace({dum.pop(): sym})
def test_harmonic_rewrite_sum():
n = Symbol("n")
m = Symbol("m")
_k = Dummy("k")
assert replace_dummy(harmonic(n).rewrite(Sum), _k) == Sum(1/_k, (_k, 1, n))
assert replace_dummy(harmonic(n, m).rewrite(Sum), _k) == Sum(_k**(-m), (_k, 1, n))
def test_euler():
assert euler(0) == 1
assert euler(1) == 0
assert euler(2) == -1
assert euler(3) == 0
assert euler(4) == 5
assert euler(6) == -61
assert euler(8) == 1385
assert euler(20, evaluate=False) != 370371188237525
n = Symbol('n', integer=True)
assert euler(n) != -1
assert euler(n).subs(n, 2) == -1
assert euler(20).evalf() == 370371188237525.0
assert euler(20, evaluate=False).evalf() == 370371188237525.0
assert euler(n).rewrite(Sum) == euler(n)
# XXX: Not sure what the guy who wrote this test was trying to do with the _j and _k stuff
assert euler(2*n + 1).rewrite(Sum) == 0
@XFAIL
def test_euler_failing():
# depends on dummy variables being implemented https://github.com/sympy/sympy/issues/5665
assert euler(2*n).rewrite(Sum) == I*Sum(Sum((-1)**_j*2**(-_k)*I**(-_k)*(-2*_j + _k)**(2*n + 1)*binomial(_k, _j)/_k, (_j, 0, _k)), (_k, 1, 2*n + 1))
def test_catalan():
assert catalan(1) == 1
assert catalan(2) == 2
assert catalan(3) == 5
assert catalan(4) == 14
assert catalan(x) == catalan(x)
assert catalan(2*x).rewrite(binomial) == binomial(4*x, 2*x)/(2*x + 1)
assert catalan(Rational(1, 2)).rewrite(gamma) == 8/(3*pi)
assert catalan(3*x).rewrite(gamma) == 4**(
3*x)*gamma(3*x + Rational(1, 2))/(sqrt(pi)*gamma(3*x + 2))
assert catalan(x).rewrite(hyper) == hyper((-x + 1, -x), (2,), 1)
assert diff(catalan(x), x) == (polygamma(
0, x + Rational(1, 2)) - polygamma(0, x + 2) + log(4))*catalan(x)
assert catalan(x).evalf() == catalan(x)
c = catalan(S.Half).evalf()
assert str(c) == '0.848826363156775'
c = catalan(I).evalf(3)
assert str((re(c), im(c))) == '(0.398, -0.0209)'
def test_nC_nP_nT():
from sympy.utilities.iterables import (
multiset_permutations, multiset_combinations, multiset_partitions,
partitions, subsets, permutations)
from sympy.functions.combinatorial.numbers import (
nP, nC, nT, stirling, _multiset_histogram, _AOP_product)
from sympy.combinatorics.permutations import Permutation
from sympy.core.numbers import oo
from random import choice
c = string.ascii_lowercase
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nP(s, i)
tot += check
assert len(list(multiset_permutations(s, i))) == check
if u:
assert nP(len(s), i) == check
assert nP(s) == tot
except AssertionError:
print(s, i, 'failed perm test')
raise ValueError()
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nC(s, i)
tot += check
assert len(list(multiset_combinations(s, i))) == check
if u:
assert nC(len(s), i) == check
assert nC(s) == tot
if u:
assert nC(len(s)) == tot
except AssertionError:
print(s, i, 'failed combo test')
raise ValueError()
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(i, j)
tot += check
assert sum(1 for p in partitions(i, j, size=True) if p[0] == j) == check
assert nT(i) == tot
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(range(i), j)
tot += check
assert len(list(multiset_partitions(range(i), j))) == check
assert nT(range(i)) == tot
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(1, 8):
check = nT(s, i)
tot += check
assert len(list(multiset_partitions(s, i))) == check
if u:
assert nT(range(len(s)), i) == check
if u:
assert nT(range(len(s))) == tot
assert nT(s) == tot
except AssertionError:
print(s, i, 'failed partition test')
raise ValueError()
# tests for Stirling numbers of the first kind that are not tested in the
# above
assert [stirling(9, i, kind=1) for i in range(11)] == [
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1, 0]
perms = list(permutations(range(4)))
assert [sum(1 for p in perms if Permutation(p).cycles == i)
for i in range(5)] == [0, 6, 11, 6, 1] == [
stirling(4, i, kind=1) for i in range(5)]
# http://oeis.org/A008275
assert [stirling(n, k, signed=1)
for n in range(10) for k in range(1, n + 1)] == [
1, -1,
1, 2, -3,
1, -6, 11, -6,
1, 24, -50, 35, -10,
1, -120, 274, -225, 85, -15,
1, 720, -1764, 1624, -735, 175, -21,
1, -5040, 13068, -13132, 6769, -1960, 322, -28,
1, 40320, -109584, 118124, -67284, 22449, -4536, 546, -36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
assert [stirling(n, k, kind=1)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 2, 3, 1,
0, 6, 11, 6, 1,
0, 24, 50, 35, 10, 1,
0, 120, 274, 225, 85, 15, 1,
0, 720, 1764, 1624, 735, 175, 21, 1,
0, 5040, 13068, 13132, 6769, 1960, 322, 28, 1,
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
assert [stirling(n, k, kind=2)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 1, 3, 1,
0, 1, 7, 6, 1,
0, 1, 15, 25, 10, 1,
0, 1, 31, 90, 65, 15, 1,
0, 1, 63, 301, 350, 140, 21, 1,
0, 1, 127, 966, 1701, 1050, 266, 28, 1,
0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1]
assert stirling(3, 4, kind=1) == stirling(3, 4, kind=1) == 0
raises(ValueError, lambda: stirling(-2, 2))
def delta(p):
if len(p) == 1:
return oo
return min(abs(i[0] - i[1]) for i in subsets(p, 2))
parts = multiset_partitions(range(5), 3)
d = 2
assert (sum(1 for p in parts if all(delta(i) >= d for i in p)) ==
stirling(5, 3, d=d) == 7)
# other coverage tests
assert nC('abb', 2) == nC('aab', 2) == 2
assert nP(3, 3, replacement=True) == nP('aabc', 3, replacement=True) == 27
assert nP(3, 4) == 0
assert nP('aabc', 5) == 0
assert nC(4, 2, replacement=True) == nC('abcdd', 2, replacement=True) == \
len(list(multiset_combinations('aabbccdd', 2))) == 10
assert nC('abcdd') == sum(nC('abcdd', i) for i in range(6)) == 24
assert nC(list('abcdd'), 4) == 4
assert nT('aaaa') == nT(4) == len(list(partitions(4))) == 5
assert nT('aaab') == len(list(multiset_partitions('aaab'))) == 7
assert nC('aabb'*3, 3) == 4 # aaa, bbb, abb, baa
assert dict(_AOP_product((4,1,1,1))) == {
0: 1, 1: 4, 2: 7, 3: 8, 4: 8, 5: 7, 6: 4, 7: 1}
# the following was the first t that showed a problem in a previous form of
# the function, so it's not as random as it may appear
t = (3, 9, 4, 6, 6, 5, 5, 2, 10, 4)
assert sum(_AOP_product(t)[i] for i in range(55)) == 58212000
raises(ValueError, lambda: _multiset_histogram({1:'a'}))
def test_issue_8496():
n = Symbol("n")
k = Symbol("k")
raises(TypeError, lambda: catalan(n, k))
raises(TypeError, lambda: euler(n, k))
|
|
#!/usr/bin/env python
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
import operator
import optparse
import os
import re
import shutil
import subprocess
import sys
sys.path.append('scripts/gyp')
from customize import ReplaceInvalidChars
from dex import AddExeExtensions
from manifest_json_parser import HandlePermissionList
from manifest_json_parser import ManifestJsonParser
def CleanDir(path):
if os.path.exists(path):
shutil.rmtree(path)
def RunCommand(command, shell=False):
"""Runs the command list, print the output, and propagate its result."""
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=shell)
if not shell:
output = proc.communicate()[0]
result = proc.returncode
print output
if result != 0:
print ('Command "%s" exited with non-zero exit code %d'
% (' '.join(command), result))
sys.exit(result)
def Which(name):
"""Search PATH for executable files with the given name."""
result = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ.get('PATH', None)
if path is None:
return []
for p in os.environ.get('PATH', '').split(os.pathsep):
p = os.path.join(p, name)
if os.access(p, os.X_OK):
result.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
result.append(pext)
return result
def Find(name, path):
"""Find executable file with the given name
and maximum API level under specific path."""
result = {}
for root, _, files in os.walk(path):
if name in files:
key = os.path.join(root, name)
sdk_version = os.path.basename(os.path.dirname(key))
str_num = re.search(r'\d+', sdk_version)
if str_num:
result[key] = int(str_num.group())
else:
result[key] = 0
if not result:
raise Exception()
return max(result.iteritems(), key=operator.itemgetter(1))[0]
def GetVersion(path):
"""Get the version of this python tool."""
version_str = 'XWalk packaging tool version '
file_handle = open(path, 'r')
src_content = file_handle.read()
version_nums = re.findall(r'\d+', src_content)
version_str += ('.').join(version_nums)
file_handle.close()
return version_str
def ParseManifest(options):
parser = ManifestJsonParser(os.path.expanduser(options.manifest))
if not options.package:
options.package = 'org.xwalk.' + parser.GetAppName().lower()
if not options.name:
options.name = parser.GetAppName()
if not options.app_version:
options.app_version = parser.GetVersion()
if parser.GetDescription():
options.description = parser.GetDescription()
if parser.GetPermissions():
options.permissions = parser.GetPermissions()
if parser.GetAppUrl():
options.app_url = parser.GetAppUrl()
elif parser.GetAppLocalPath():
options.app_local_path = parser.GetAppLocalPath()
else:
print 'Error: there is no app launch path defined in manifest.json.'
sys.exit(9)
if parser.GetAppRoot():
options.app_root = parser.GetAppRoot()
temp_dict = parser.GetIcons()
try:
icon_dict = dict((int(k), v) for k, v in temp_dict.iteritems())
except ValueError:
print 'The key of icon in the manifest file should be a number.'
# TODO(junmin): add multiple icons support.
if icon_dict:
icon_file = max(icon_dict.iteritems(), key=operator.itemgetter(0))[1]
options.icon = os.path.join(options.app_root, icon_file)
options.enable_remote_debugging = False
if parser.GetFullScreenFlag().lower() == 'true':
options.fullscreen = True
elif parser.GetFullScreenFlag().lower() == 'false':
options.fullscreen = False
def ParseXPK(options, out_dir):
cmd = ['python', 'parse_xpk.py',
'--file=%s' % os.path.expanduser(options.xpk),
'--out=%s' % out_dir]
RunCommand(cmd)
if options.manifest:
print ('Use the manifest from XPK by default '
'when "--xpk" option is specified, and '
'the "--manifest" option would be ignored.')
sys.exit(7)
if os.path.isfile(os.path.join(out_dir, 'manifest.json')):
options.manifest = os.path.join(out_dir, 'manifest.json')
else:
print 'XPK doesn\'t contain manifest file.'
sys.exit(8)
def FindExtensionJars(root_path):
''' Find all .jar files for external extensions. '''
extension_jars = []
if not os.path.exists(root_path):
return extension_jars
for afile in os.listdir(root_path):
if os.path.isdir(os.path.join(root_path, afile)):
base_name = os.path.basename(afile)
extension_jar = os.path.join(root_path, afile, base_name + '.jar')
if os.path.isfile(extension_jar):
extension_jars.append(extension_jar)
return extension_jars
def Customize(options):
package = '--package=org.xwalk.app.template'
if options.package:
package = '--package=%s' % options.package
name = '--name=AppTemplate'
if options.name:
name = '--name=%s' % options.name
app_version = '--app-version=1.0.0'
if options.app_version:
app_version = '--app-version=%s' % options.app_version
description = ''
if options.description:
description = '--description=%s' % options.description
permissions = ''
if options.permissions:
permissions = '--permissions=%s' % options.permissions
icon = ''
if options.icon:
icon = '--icon=%s' % os.path.expanduser(options.icon)
app_url = ''
if options.app_url:
app_url = '--app-url=%s' % options.app_url
app_root = ''
if options.app_root:
app_root = '--app-root=%s' % os.path.expanduser(options.app_root)
app_local_path = ''
if options.app_local_path:
app_local_path = '--app-local-path=%s' % options.app_local_path
remote_debugging = ''
if options.enable_remote_debugging:
remote_debugging = '--enable-remote-debugging'
fullscreen_flag = ''
if options.fullscreen:
fullscreen_flag = '-f'
extensions_list = ''
if options.extensions:
extensions_list = '--extensions=%s' % options.extensions
orientation = '--orientation=unspecified'
if options.orientation:
orientation = '--orientation=%s' % options.orientation
cmd = ['python', 'customize.py', package,
name, app_version, description, icon, permissions, app_url,
remote_debugging, app_root, app_local_path, fullscreen_flag,
extensions_list, orientation]
RunCommand(cmd)
def Execution(options, sanitized_name):
android_path_array = Which('android')
if not android_path_array:
print 'Please install Android SDK first.'
sys.exit(1)
sdk_root_path = os.path.dirname(os.path.dirname(android_path_array[0]))
try:
sdk_jar_path = Find('android.jar', os.path.join(sdk_root_path, 'platforms'))
except Exception:
print 'Your Android SDK may be ruined, please reinstall it.'
sys.exit(2)
level_string = os.path.basename(os.path.dirname(sdk_jar_path))
api_level = int(re.search(r'\d+', level_string).group())
if api_level < 14:
print 'Please install Android API level (>=14) first.'
sys.exit(3)
if options.keystore_path:
key_store = os.path.expanduser(options.keystore_path)
if options.keystore_alias:
key_alias = options.keystore_alias
else:
print 'Please provide an alias name of the developer key.'
sys.exit(6)
if options.keystore_passcode:
key_code = options.keystore_passcode
else:
print 'Please provide the passcode of the developer key.'
sys.exit(6)
else:
print ('Use xwalk\'s keystore by default for debugging. '
'Please switch to your keystore when distributing it to app market.')
key_store = 'scripts/ant/xwalk-debug.keystore'
key_alias = 'xwalkdebugkey'
key_code = 'xwalkdebug'
if not os.path.exists('out'):
os.mkdir('out')
# Make sure to use ant-tasks.jar correctly.
# Default Android SDK names it as ant-tasks.jar
# Chrome third party Android SDk names it as anttasks.jar
ant_tasks_jar_path = os.path.join(sdk_root_path,
'tools', 'lib', 'ant-tasks.jar')
if not os.path.exists(ant_tasks_jar_path):
ant_tasks_jar_path = os.path.join(sdk_root_path,
'tools', 'lib' ,'anttasks.jar')
aapt_path = ''
for aapt_str in AddExeExtensions('aapt'):
try:
aapt_path = Find(aapt_str, sdk_root_path)
print 'Use %s in %s.' % (aapt_str, sdk_root_path)
break
except Exception:
print 'There doesn\'t exist %s in %s.' % (aapt_str, sdk_root_path)
if not aapt_path:
print 'Your Android SDK may be ruined, please reinstall it.'
sys.exit(2)
# Check whether ant is installed.
try:
cmd = ['ant', '-version']
RunCommand(cmd, True)
except EnvironmentError:
print 'Please install ant first.'
sys.exit(4)
res_dirs = '-DADDITIONAL_RES_DIRS=\'\''
res_packages = '-DADDITIONAL_RES_PACKAGES=\'\''
res_r_text_files = '-DADDITIONAL_R_TEXT_FILES=\'\''
if options.mode == 'embedded':
# Prepare the .pak file for embedded mode.
pak_src_path = os.path.join('native_libs_res', 'xwalk.pak')
pak_des_path = os.path.join(sanitized_name, 'assets', 'xwalk.pak')
shutil.copy(pak_src_path, pak_des_path)
js_src_dir = os.path.join('native_libs_res', 'jsapi')
js_des_dir = os.path.join(sanitized_name, 'assets', 'jsapi')
if os.path.exists(js_des_dir):
shutil.rmtree(js_des_dir)
shutil.copytree(js_src_dir, js_des_dir)
res_ui_java = os.path.join('gen', 'ui_java')
res_content_java = os.path.join('gen', 'content_java')
res_xwalk_java = os.path.join('gen', 'xwalk_core_java')
res_dirs = ('-DADDITIONAL_RES_DIRS='
+ os.path.join(res_ui_java, 'res_crunched') + ' '
+ os.path.join(res_ui_java, 'res_v14_compatibility') + ' '
+ os.path.join(res_ui_java, 'res_grit') + ' '
+ os.path.join('libs_res', 'ui') + ' '
+ os.path.join(res_content_java, 'res_crunched') + ' '
+ os.path.join(res_content_java, 'res_v14_compatibility') + ' '
+ os.path.join('libs_res', 'content') + ' '
+ os.path.join(res_content_java, 'res_grit') + ' '
+ os.path.join(res_xwalk_java, 'res_crunched') + ' '
+ os.path.join(res_xwalk_java, 'res_v14_compatibility') + ' '
+ os.path.join('libs_res', 'runtime') + ' '
+ os.path.join(res_xwalk_java, 'res_grit'))
res_packages = ('-DADDITIONAL_RES_PACKAGES=org.chromium.ui '
'org.xwalk.core org.chromium.content')
res_r_text_files = ('-DADDITIONAL_R_TEXT_FILES='
+ os.path.join(res_ui_java, 'java_R', 'R.txt') + ' '
+ os.path.join(res_xwalk_java, 'java_R', 'R.txt') + ' '
+ os.path.join(res_content_java, 'java_R', 'R.txt'))
resource_dir = '-DRESOURCE_DIR=' + os.path.join(sanitized_name, 'res')
manifest_path = os.path.join(sanitized_name, 'AndroidManifest.xml')
cmd = ['python', os.path.join('scripts', 'gyp', 'ant.py'),
'-DAAPT_PATH=%s' % aapt_path,
res_dirs,
res_packages,
res_r_text_files,
'-DANDROID_MANIFEST=%s' % manifest_path,
'-DANDROID_SDK_JAR=%s' % sdk_jar_path,
'-DANDROID_SDK_ROOT=%s' % sdk_root_path,
'-DANDROID_SDK_VERSION=%d' % api_level,
'-DANT_TASKS_JAR=%s' % ant_tasks_jar_path,
'-DLIBRARY_MANIFEST_PATHS= ',
'-DOUT_DIR=out',
resource_dir,
'-DSTAMP=codegen.stamp',
'-Dbasedir=.',
'-buildfile',
os.path.join('scripts', 'ant', 'apk-codegen.xml')]
RunCommand(cmd)
# Check whether java is installed.
try:
cmd = ['java', '-version']
RunCommand(cmd, True)
except EnvironmentError:
print 'Please install Oracle JDK first.'
sys.exit(5)
# Compile App source code with app runtime code.
classpath = '--classpath='
classpath += os.path.join(os.getcwd(), 'libs',
'xwalk_app_runtime_java.jar')
classpath += ' ' + sdk_jar_path
src_dirs = '--src-dirs=' + os.path.join(os.getcwd(), sanitized_name, 'src') +\
' ' + os.path.join(os.getcwd(), 'out', 'gen')
cmd = ['python', os.path.join('scripts', 'gyp', 'javac.py'),
'--output-dir=%s' % os.path.join('out', 'classes'),
classpath,
src_dirs,
'--javac-includes=',
'--chromium-code=0',
'--stamp=compile.stam']
RunCommand(cmd)
# Package resources.
asset_dir = '-DASSET_DIR=%s' % os.path.join(sanitized_name, 'assets')
xml_path = os.path.join('scripts', 'ant', 'apk-package-resources.xml')
cmd = ['python', os.path.join('scripts', 'gyp', 'ant.py'),
'-DAAPT_PATH=%s' % aapt_path,
res_dirs,
res_packages,
res_r_text_files,
'-DANDROID_SDK_JAR=%s' % sdk_jar_path,
'-DANDROID_SDK_ROOT=%s' % sdk_root_path,
'-DANT_TASKS_JAR=%s' % ant_tasks_jar_path,
'-DAPK_NAME=%s' % sanitized_name,
'-DAPP_MANIFEST_VERSION_CODE=0',
'-DAPP_MANIFEST_VERSION_NAME=Developer Build',
asset_dir,
'-DCONFIGURATION_NAME=Release',
'-DOUT_DIR=out',
resource_dir,
'-DSTAMP=package_resources.stamp',
'-Dbasedir=.',
'-buildfile',
xml_path]
RunCommand(cmd)
dex_path = '--dex-path=' + os.path.join(os.getcwd(), 'out', 'classes.dex')
app_runtime_jar = os.path.join(os.getcwd(),
'libs', 'xwalk_app_runtime_java.jar')
# Check whether external extensions are included.
extensions_string = 'xwalk-extensions'
extensions_dir = os.path.join(os.getcwd(), sanitized_name, extensions_string)
external_extension_jars = FindExtensionJars(extensions_dir)
input_jars = []
if options.mode == 'embedded':
input_jars.append(os.path.join(os.getcwd(), 'libs',
'xwalk_core_embedded.dex.jar'))
dex_command_list = ['python', os.path.join('scripts', 'gyp', 'dex.py'),
dex_path,
'--android-sdk-root=%s' % sdk_root_path,
app_runtime_jar,
os.path.join(os.getcwd(), 'out', 'classes')]
dex_command_list.extend(external_extension_jars)
dex_command_list.extend(input_jars)
RunCommand(dex_command_list)
src_dir = '-DSOURCE_DIR=' + os.path.join(sanitized_name, 'src')
apk_path = '-DUNSIGNED_APK_PATH=' + os.path.join('out', 'app-unsigned.apk')
native_lib_path = '-DNATIVE_LIBS_DIR='
if options.mode == 'embedded':
if options.arch == 'x86':
x86_native_lib_path = os.path.join('native_libs', 'x86', 'libs',
'x86', 'libxwalkcore.so')
if os.path.isfile(x86_native_lib_path):
native_lib_path += os.path.join('native_libs', 'x86', 'libs')
else:
print 'Missing x86 native library for Crosswalk embedded APK. Abort!'
sys.exit(10)
elif options.arch == 'arm':
arm_native_lib_path = os.path.join('native_libs', 'armeabi-v7a', 'libs',
'armeabi-v7a', 'libxwalkcore.so')
if os.path.isfile(arm_native_lib_path):
native_lib_path += os.path.join('native_libs', 'armeabi-v7a', 'libs')
else:
print 'Missing ARM native library for Crosswalk embedded APK. Abort!'
sys.exit(10)
# A space is needed for Windows.
native_lib_path += ' '
cmd = ['python', 'scripts/gyp/ant.py',
'-DANDROID_SDK_ROOT=%s' % sdk_root_path,
'-DANT_TASKS_JAR=%s' % ant_tasks_jar_path,
'-DAPK_NAME=%s' % sanitized_name,
'-DCONFIGURATION_NAME=Release',
native_lib_path,
'-DOUT_DIR=out',
src_dir,
apk_path,
'-Dbasedir=.',
'-buildfile',
'scripts/ant/apk-package.xml']
RunCommand(cmd)
apk_path = '--unsigned-apk-path=' + os.path.join('out', 'app-unsigned.apk')
final_apk_path = '--final-apk-path=' + \
os.path.join('out', sanitized_name + '.apk')
cmd = ['python', 'scripts/gyp/finalize_apk.py',
'--android-sdk-root=%s' % sdk_root_path,
apk_path,
final_apk_path,
'--keystore-path=%s' % key_store,
'--keystore-alias=%s' % key_alias,
'--keystore-passcode=%s' % key_code]
RunCommand(cmd)
src_file = os.path.join('out', sanitized_name + '.apk')
if options.mode == 'shared':
dst_file = '%s.apk' % options.name
elif options.mode == 'embedded':
dst_file = '%s_%s.apk' % (options.name, options.arch)
shutil.copyfile(src_file, dst_file)
CleanDir('out')
if options.mode == 'embedded':
os.remove(pak_des_path)
def MakeApk(options, sanitized_name):
Customize(options)
if options.mode == 'shared':
Execution(options, sanitized_name)
print ('The cross platform APK of the web application was '
'generated successfully at %s.apk, based on the shared '
'Crosswalk library.'
% sanitized_name)
elif options.mode == 'embedded':
if options.arch:
Execution(options, sanitized_name)
print ('The Crosswalk embedded APK of web application "%s" for '
'platform %s was generated successfully at %s_%s.apk.'
% (sanitized_name, options.arch, sanitized_name, options.arch))
else:
# If the arch option is unspecified, all of available platform APKs
# will be generated.
platform_str = ''
apk_str = ''
valid_archs = ['x86', 'armeabi-v7a']
for arch in valid_archs:
if os.path.isfile(os.path.join('native_libs', arch, 'libs',
arch, 'libxwalkcore.so')):
if platform_str != '':
platform_str += ' and '
apk_str += ' and '
if arch.find('x86') != -1:
options.arch = 'x86'
elif arch.find('arm') != -1:
options.arch = 'arm'
platform_str += options.arch
apk_str += '%s_%s.apk' % (sanitized_name, options.arch)
Execution(options, sanitized_name)
if apk_str.find('and') != -1:
print ('The Crosswalk embedded APKs of web application "%s" for '
'platform %s were generated successfully at %s.'
% (sanitized_name, platform_str, apk_str))
else:
print ('The Crosswalk embedded APK of web application "%s" for '
'platform %s was generated successfully at %s.'
% (sanitized_name, platform_str, apk_str))
else:
print 'Unknown mode for packaging the application. Abort!'
sys.exit(11)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-v', '--version', action='store_true',
dest='version', default=False,
help='The version of this python tool.')
info = ('The packaging mode of the web application. The value \'shared\' '
'means that the runtime is shared across multiple application '
'instances and that the runtime needs to be distributed separately. '
'The value \'embedded\' means that the runtime is embedded into the '
'application itself and distributed along with it.'
'Set the default mode as \'embedded\'. For example: --mode=embedded')
parser.add_option('--mode', default='embedded', help=info)
info = ('The target architecture of the embedded runtime. Supported values '
'are \'x86\' and \'arm\'. Note, if undefined, APKs for all possible '
'architestures will be generated.')
parser.add_option('--arch', help=info)
group = optparse.OptionGroup(parser, 'Application Source Options',
'This packaging tool supports 3 kinds of web application source: '
'1) XPK package; 2) manifest.json; 3) various command line options, '
'for example, \'--app-url\' for website, \'--app-root\' and '
'\'--app-local-path\' for local web application.')
info = ('The path of the XPK package. For example, --xpk=/path/to/xpk/file')
group.add_option('--xpk', help=info)
info = ('The manifest file with the detail description of the application. '
'For example, --manifest=/path/to/your/manifest/file')
group.add_option('--manifest', help=info)
info = ('The url of application. '
'This flag allows to package website as apk. For example, '
'--app-url=http://www.intel.com')
group.add_option('--app-url', help=info)
info = ('The root path of the web app. '
'This flag allows to package local web app as apk. For example, '
'--app-root=/root/path/of/the/web/app')
group.add_option('--app-root', help=info)
info = ('The relative path of entry file based on the value from '
'\'app_root\'. This flag should work with \'--app-root\' together. '
'For example, --app-local-path=/relative/path/of/entry/file')
group.add_option('--app-local-path', help=info)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Mandatory arguments',
'They are used for describing the APK information through '
'command line options.')
info = ('The apk name. For example, --name=YourApplicationName')
group.add_option('--name', help=info)
info = ('The package name. For example, '
'--package=com.example.YourPackage')
group.add_option('--package', help=info)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Optional arguments',
'They are used for various settings for applications through '
'command line options.')
info = ('The version name of the application. '
'For example, --app-version=1.0.0')
group.add_option('--app-version', help=info)
info = ('The description of the application. For example, '
'--description=YourApplicationDescription')
group.add_option('--description', help=info)
group.add_option('--enable-remote-debugging', action='store_true',
dest='enable_remote_debugging', default=False,
help = 'Enable remote debugging.')
info = ('The list of external extension paths splitted by OS separators. '
'The separators are \':\' , \';\' and \':\' on Linux, Windows and '
'Mac OS respectively. For example, '
'--extensions=/path/to/extension1:/path/to/extension2.')
group.add_option('--extensions', help=info)
group.add_option('-f', '--fullscreen', action='store_true',
dest='fullscreen', default=False,
help='Make application fullscreen.')
info = ('The path of application icon. '
'Such as: --icon=/path/to/your/customized/icon')
group.add_option('--icon', help=info)
info = ('The orientation of the web app\'s display on the device. '
'For example, --orientation=landscape. The default value is '
'\'unspecified\'. The permitted values are from Android: '
'http://developer.android.com/guide/topics/manifest/'
'activity-element.html#screen')
group.add_option('--orientation', help=info)
info = ('The list of permissions to be used by web application. For example, '
'--permissions=\'geolocation:webgl\'')
group.add_option('--permissions', help=info)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Keystore Options',
'The keystore is a signature from web developer, it\'s used when '
'developer wants to distribute the applications.')
info = ('The path to the developer keystore. For example, '
'--keystore-path=/path/to/your/developer/keystore')
group.add_option('--keystore-path', help=info)
info = ('The alias name of keystore. For example, --keystore-alias=name')
group.add_option('--keystore-alias', help=info)
info = ('The passcode of keystore. For example, --keystore-passcode=code')
group.add_option('--keystore-passcode', help=info)
parser.add_option_group(group)
options, _ = parser.parse_args()
if len(argv) == 1:
parser.print_help()
return 0
if options.version:
if os.path.isfile('VERSION'):
print GetVersion('VERSION')
return 0
else:
parser.error('Can\'t get version due to the VERSION file missing!')
xpk_temp_dir = ''
if options.xpk:
xpk_name = os.path.splitext(os.path.basename(options.xpk))[0]
xpk_temp_dir = xpk_name + '_xpk'
ParseXPK(options, xpk_temp_dir)
if not options.manifest:
if not options.package:
parser.error('The package name is required! '
'Please use "--package" option.')
if not options.name:
parser.error('The APK name is required! Pleaes use "--name" option.')
if not ((options.app_url and not options.app_root
and not options.app_local_path) or ((not options.app_url)
and options.app_root and options.app_local_path)):
parser.error('The entry is required. If the entry is a remote url, '
'please use "--app-url" option; If the entry is local, '
'please use "--app-root" and '
'"--app-local-path" options together!')
if options.permissions:
permission_list = options.permissions.split(':')
options.permissions = HandlePermissionList(permission_list)
else:
try:
ParseManifest(options)
except KeyError, ec:
print 'The manifest file contains syntax errors.'
return ec.code
options.name = ReplaceInvalidChars(options.name, 'apkname')
options.package = ReplaceInvalidChars(options.package)
sanitized_name = ReplaceInvalidChars(options.name)
try:
MakeApk(options, sanitized_name)
except SystemExit, ec:
CleanDir(sanitized_name)
CleanDir('out')
if os.path.exists(xpk_temp_dir):
CleanDir(xpk_temp_dir)
return ec.code
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
# SDT2Parser.py
#
# Callback target class for the ElementTree parser to parse a SDT2
from .SDT2Classes import *
class SDT2Parser:
# Define the element tags of the SDT3
includeTag = 'include'
domainTag = 'domain'
importsTag = 'imports'
modulesTag = 'modules'
moduleClassTag = 'moduleclass'
moduleTag = 'module'
extendsTag = 'extends'
rootDevicesTag = 'rootdevices'
rootDeviceTag = 'rootdevice'
devicesTag = 'devices'
deviceTag = 'device'
deviceInfoTag = 'deviceinfo'
deviceInfoNameTag = 'name'
deviceInfoVendorTag = 'vendor'
deviceInfoFirmwareVersionTag = 'firmwareversion'
deviceInfoVendorURLTag = 'vendorurl'
deviceInfoSerialNumberTag = 'serialnumber'
actionsTag = 'actions'
actionTag = 'action'
argTag = 'arg'
eventsTag = 'events'
eventTag = 'event'
dataTag = 'data'
dataPointTag = 'datapoint'
docTag = 'doc'
ttTag = 'tt'
emTag = 'em'
bTag = 'b'
pTag = 'p'
imgTag = 'img'
imgCaptionTag = 'caption'
def __init__(self):
self.elementStack = []
self.nameSpaces = []
self.domain = None
def start(self, tag, attrib):
# First add the name space to the list of used name spaces
uri, ignore, ntag = tag[1:].partition("}")
if (uri not in self.nameSpaces):
self.nameSpaces.append(uri)
ntag = ntag.lower()
# Handle all elements
# Domain, includes
if (ntag == SDT2Parser.domainTag):
domain = SDT2Domain()
domain.id = attrib['id'].strip() if 'id' in attrib else None
self.elementStack.append(domain)
elif (ntag == SDT2Parser.includeTag):
if (isinstance(self.elementStack[-1], SDT2Domain)):
domain = self.elementStack[-1]
include = SDT2Include()
include.parse = attrib['parse'].strip() if 'parse' in attrib else None
include.href = attrib['href'].strip() if 'href' in attrib else None
domain.includes.append(include)
else:
raise SyntaxError('<include> definition is only allowed in <domain> element')
# ModulClass, Module, Extends
elif (ntag == SDT2Parser.moduleClassTag):
if (isinstance(self.elementStack[-1], SDT2Domain)):
domain = self.elementStack[-1]
module = SDT2ModuleClass()
module.name = attrib['name'].strip() if 'name' in attrib else None
#module.extends = attrib['extends'].strip() if 'extends' in attrib else None
domain.modules.append(module)
self.elementStack.append(module)
else:
raise SyntaxError('<ModuleClass> definition is only allowed in <Domain> element')
elif (ntag == SDT2Parser.moduleTag):
if (isinstance(self.elementStack[-1], SDT2RootDevice) or isinstance(self.elementStack[-1], SDT2Device)):
device = self.elementStack[-1]
module = SDT2Module()
module.name = attrib['name'].strip() if 'name' in attrib else None
#module.extends = attrib['extends'].strip() if 'extends' in attrib else None
device.modules.append(module)
self.elementStack.append(module)
else:
raise SyntaxError('<Module> definition is only allowed in <RootDevice> or <Device> element')
elif (ntag == SDT2Parser.extendsTag):
if (isinstance(self.elementStack[-1], SDT2Module) or isinstance(self.elementStack[-1], SDT2ModuleClass)):
moduleClass = self.elementStack[-1]
extends = SDT2Extends()
extends.domain = attrib['domain'].strip() if 'domain' in attrib else None
extends.clazz = attrib['class'].strip() if 'class' in attrib else None
moduleClass.extends = extends
else:
raise SyntaxError('<extends> definition is only allowed in <Module> or <ModuleClass> element')
# RootDevice, Device
elif (ntag == SDT2Parser.rootDeviceTag):
if (isinstance(self.elementStack[-1], SDT2Domain)):
domain = self.elementStack[-1]
rootDevice = SDT2RootDevice()
rootDevice.id = attrib['id'].strip() if 'id' in attrib else None
domain.rootDevices.append(rootDevice)
self.elementStack.append(rootDevice)
else:
raise SyntaxError('<RootDevice> definition is only allowed in <Domain> element')
elif (ntag == SDT2Parser.deviceTag):
if (isinstance(self.elementStack[-1], SDT2RootDevice)):
rootDevice = self.elementStack[-1]
device = SDT2Device()
device.id = attrib['id'].strip() if 'id' in attrib else None
rootDevice.devices.append(device)
self.elementStack.append(device)
else:
raise SyntaxError('<Device> definition is only allowed in <RootDevice> element')
# Action, Arg
elif (ntag == SDT2Parser.actionTag):
if (isinstance(self.elementStack[-1], SDT2Module) or isinstance(self.elementStack[-1], SDT2ModuleClass)):
moduleClass = self.elementStack[-1]
action = SDT2Action()
action.name = attrib['name'] if 'name' in attrib else None
action.type = attrib['type'].strip() if 'type' in attrib else None
moduleClass.actions.append(action)
self.elementStack.append(action)
else:
raise SyntaxError('<Action> definition is only allowed in <Module> or <ModuleClass> element')
elif (ntag == SDT2Parser.argTag):
if (isinstance(self.elementStack[-1], SDT2Action)):
action = self.elementStack[-1]
arg = SDT2Arg()
arg.name = attrib['name'].strip() if 'name' in attrib else None
arg.type = attrib['type'].strip() if 'type' in attrib else None
action.arg.append(arg)
else:
raise SyntaxError('<Arg> definition is only allowed in <Action> element')
# Event
elif (ntag == SDT2Parser.eventTag):
if (isinstance(self.elementStack[-1], SDT2Module) or isinstance(self.elementStack[-1], SDT2ModuleClass)):
moduleClass = self.elementStack[-1]
event = SDT2Event()
event.name = attrib['name'].strip() if 'name' in attrib else None
moduleClass.events.append(event)
self.elementStack.append(event)
else:
raise SyntaxError('<Event> definition is only allowed in <Module> or <ModuleClass> element')
# DataPoint
elif (ntag == SDT2Parser.dataPointTag):
if (isinstance(self.elementStack[-1], SDT2Event) or isinstance(self.elementStack[-1], SDT2ModuleClass) or isinstance(self.elementStack[-1], SDT2Module)):
dataPoint = SDT2DataPoint()
dataPoint.name = attrib['name'].strip() if 'name' in attrib else None
dataPoint.type = attrib['type'].strip() if 'type' in attrib else None
dataPoint.writable = attrib['writable'].strip() if 'writable' in attrib else None
dataPoint.readable = attrib['readable'].strip() if 'readable' in attrib else None
dataPoint.eventable = attrib['eventable'].strip() if 'eventable' in attrib else None
if (isinstance(self.elementStack[-1], SDT2Event)):
event = self.elementStack[-1]
event.data.append(dataPoint)
if (isinstance(self.elementStack[-1], SDT2ModuleClass) or isinstance(self.elementStack[-1], SDT2Module)):
module = self.elementStack[-1]
module.data.append(dataPoint)
self.elementStack.append(dataPoint)
else:
raise SyntaxError('<DataPoint> definition is only allowed in <Event>, <Module> or <ModuleClass> element')
# DeviceInfo & elements
elif (ntag == SDT2Parser.deviceInfoTag):
if (isinstance(self.elementStack[-1], SDT2RootDevice) or isinstance(self.elementStack[-1], SDT2Device)):
deviceInfo = SDT2DeviceInfo()
if (isinstance(self.elementStack[-1], SDT2RootDevice)):
rootDevice = self.elementStack[-1]
rootDevice.deviceInfo = deviceInfo
elif (isinstance(self.elementStack[-1], SDT2Device)):
device = self.elementStack[-1]
device.deviceInfo = deviceInfo
self.elementStack.append(deviceInfo)
else:
raise SyntaxError('<DeviceInfo> definition is only allowed in <RootDevice> or <Device> element')
# Doc & elements
elif (ntag == SDT2Parser.docTag):
if (isinstance(self.elementStack[-1], SDT2RootDevice) or isinstance(self.elementStack[-1], SDT2Device) or
isinstance(self.elementStack[-1], SDT2Module) or isinstance(self.elementStack[-1], SDT2ModuleClass) or
isinstance(self.elementStack[-1], SDT2Action) or isinstance(self.elementStack[-1], SDT2DataPoint) or
isinstance(self.elementStack[-1], SDT2Event)
):
doc = SDT2Doc()
elem = self.elementStack[-1]
elem.doc = doc
self.elementStack.append(doc)
else:
raise SyntaxError('<Doc> definition is only allowed in <RootDevice>, <Device>, <Module>' +
'<ModuleClass>, <Action>, <DataPoint> or <Event> element')
elif (ntag == SDT2Parser.ttTag):
if (isinstance(self.elementStack[-1], SDT2Doc) or isinstance(self.elementStack[-1], SDT2DocP)):
obj = self.elementStack[-1]
tt = SDT2DocTT()
tt.doc = obj.doc
self.elementStack.append(tt)
else:
raise SyntaxError('<tt> definition is only allowed in <Doc> or <p> element')
elif (ntag == SDT2Parser.emTag):
if (isinstance(self.elementStack[-1], SDT2Doc) or isinstance(self.elementStack[-1], SDT2DocP)):
obj = self.elementStack[-1]
em = SDT2DocEM()
em.doc = obj.doc
self.elementStack.append(em)
else:
raise SyntaxError('<em> definition is only allowed in <Doc> or <p> element')
elif (ntag == SDT2Parser.bTag):
if (isinstance(self.elementStack[-1], SDT2Doc) or isinstance(self.elementStack[-1], SDT2DocP)):
obj = self.elementStack[-1]
b = SDT2DocB()
b.doc = obj.doc
self.elementStack.append(b)
else:
raise SyntaxError('<b> definition is only allowed in <Doc> or <p> element')
elif (ntag == SDT2Parser.pTag):
if (isinstance(self.elementStack[-1], SDT2Doc) or isinstance(self.elementStack[-1], SDT2DocP)):
obj = self.elementStack[-1]
p = SDT2DocP()
p.doc = obj.doc
p.startParagraph()
self.elementStack.append(p)
else:
raise SyntaxError('<p> definition is only allowed in <Doc> or <p> element')
elif (ntag == SDT2Parser.imgTag):
if (isinstance(self.elementStack[-1], SDT2Doc) or isinstance(self.elementStack[-1], SDT2DocP)):
obj = self.elementStack[-1]
img = SDT2DocIMG()
img.doc = obj.doc
img.startImage(attrib['src'].strip() if 'src' in attrib else None)
self.elementStack.append(img)
else:
raise SyntaxError('<img> definition is only allowed in <Doc> or <p> element')
elif (ntag == SDT2Parser.imgCaptionTag):
if (isinstance(self.elementStack[-1], SDT2DocIMG)):
obj = self.elementStack[-1]
caption = SDT2DocCaption()
caption.doc = obj.doc
self.elementStack.append(caption)
else:
raise SyntaxError('<caption> definition is only allowed in <img> element')
# Other tags to ignore / just containers
elif (ntag == SDT2Parser.deviceInfoNameTag and isinstance(self.elementStack[-1], SDT2DeviceInfo)):
self.elementStack.append(SDT2DeviceInfoName())
elif (ntag == SDT2Parser.deviceInfoVendorTag and isinstance(self.elementStack[-1], SDT2DeviceInfo)):
self.elementStack.append(SDT2DeviceInfoVendor())
elif (ntag == SDT2Parser.deviceInfoFirmwareVersionTag and isinstance(self.elementStack[-1], SDT2DeviceInfo)):
self.elementStack.append(SDT2DeviceInfoFirmwareVersion())
elif (ntag == SDT2Parser.deviceInfoVendorURLTag and isinstance(self.elementStack[-1], SDT2DeviceInfo)):
self.elementStack.append(SDT2DeviceInfoVendorURL())
elif (ntag == SDT2Parser.deviceInfoSerialNumberTag and isinstance(self.elementStack[-1], SDT2DeviceInfo)):
self.elementStack.append(SDT2DeviceInfoSerialNumber())
elif (ntag == SDT2Parser.rootDevicesTag or
ntag == SDT2Parser.devicesTag or
ntag == SDT2Parser.modulesTag or
ntag == SDT2Parser.actionsTag or
ntag == SDT2Parser.eventsTag or
ntag == SDT2Parser.dataTag or
ntag == SDT2Parser.importsTag):
pass
else:
# print(tag, attrib)
pass
def end(self, tag):
uri, ignore, ntag = tag[1:].partition("}")
ntag = ntag.lower()
if (ntag == SDT2Parser.domainTag):
self.domain = self.elementStack.pop()
elif (ntag == SDT2Parser.moduleClassTag or
ntag == SDT2Parser.moduleTag or
ntag == SDT2Parser.rootDeviceTag or
ntag == SDT2Parser.deviceTag or
ntag == SDT2Parser.actionTag or
ntag == SDT2Parser.deviceInfoTag or
ntag == SDT2Parser.deviceInfoNameTag or
ntag == SDT2Parser.deviceInfoVendorTag or
ntag == SDT2Parser.deviceInfoFirmwareVersionTag or
ntag == SDT2Parser.deviceInfoVendorURLTag or
ntag == SDT2Parser.deviceInfoSerialNumberTag or
ntag == SDT2Parser.eventTag or
ntag == SDT2Parser.dataPointTag or
ntag == SDT2Parser.docTag or
ntag == SDT2Parser.ttTag or
ntag == SDT2Parser.emTag or
ntag == SDT2Parser.bTag or
ntag == SDT2Parser.imgCaptionTag):
self.elementStack.pop()
elif (ntag == SDT2Parser.pTag):
obj = self.elementStack.pop()
obj.endParagraph()
elif (ntag == SDT2Parser.imgTag):
obj = self.elementStack.pop()
obj.endImage()
def data(self, data):
if (isinstance(self.elementStack[-1], SDT2DeviceInfoName)):
deviceInfo = self.elementStack[-2]
deviceInfo.name = data
elif (isinstance(self.elementStack[-1], SDT2DeviceInfoVendor)):
deviceInfo = self.elementStack[-2]
deviceInfo.vendor = data
elif (isinstance(self.elementStack[-1], SDT2DeviceInfoFirmwareVersion)):
deviceInfo = self.elementStack[-2]
deviceInfo.firmwareVersion = data
elif (isinstance(self.elementStack[-1], SDT2DeviceInfoVendorURL)):
deviceInfo = self.elementStack[-2]
deviceInfo.vendorURL = data
elif (isinstance(self.elementStack[-1], SDT2DeviceInfoSerialNumber)):
deviceInfo = self.elementStack[-2]
deviceInfo.serialNumber = data
elif (isinstance(self.elementStack[-1], SDT2Doc)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
elif (isinstance(self.elementStack[-1], SDT2DocTT)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
elif (isinstance(self.elementStack[-1], SDT2DocEM)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
elif (isinstance(self.elementStack[-1], SDT2DocB)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
elif (isinstance(self.elementStack[-1], SDT2DocP)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
elif (isinstance(self.elementStack[-1], SDT2DocIMG)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
elif (isinstance(self.elementStack[-1], SDT2DocCaption)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
def close(self):
pass
def comment(self, data):
#print('comment' + data)
pass
|
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <[email protected]>
# Tue May 31 16:55:10 2011 +0200
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
"""Tests on the machine infrastructure.
"""
import os, sys
import nose.tools
import math
import numpy
from . import Machine, PCATrainer, FisherLDATrainer, CGLogRegTrainer, \
WhiteningTrainer, WCCNTrainer
import bob.io.base
from bob.learn.activation import HyperbolicTangent, Identity
from bob.io.base import HDF5File
from bob.io.base.test_utils import datafile
def F(f):
"""Returns the test file on the "data" subdirectory"""
return __import__('pkg_resources').resource_filename(__name__, os.path.join('data', f))
MACHINE = F('linear-test.hdf5')
def test_initialization():
# Two inputs and 1 output
m = Machine(2,1)
assert (m.weights == 0.0).all()
nose.tools.eq_( m.weights.shape, (2,1) )
assert (m.biases == 0.0).all()
nose.tools.eq_( m.biases.shape, (1,) )
# Start by providing the data
w = numpy.array([[0.4, 0.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
m = Machine(w)
b = numpy.array([0.3, -3.0], 'float64')
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
m.input_subtract = isub
m.input_divide = idiv
m.biases = b
m.activation = HyperbolicTangent()
assert (m.input_subtract == isub).all()
assert (m.input_divide == idiv).all()
assert (m.weights == w).all()
assert (m.biases == b). all()
nose.tools.eq_(m.activation, HyperbolicTangent())
# Save to file
# c = HDF5File("bla.hdf5", 'w')
# m.save(c)
# Start by reading data from a file
c = HDF5File(MACHINE)
m = Machine(c)
assert (m.weights == w).all()
assert (m.biases == b). all()
# Makes sure we cannot stuff incompatible data
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
m = Machine(w)
b = numpy.array([0.3, -3.0, 2.7, -18, 52], 'float64') #wrong
nose.tools.assert_raises(RuntimeError, setattr, m, 'biases', b)
nose.tools.assert_raises(RuntimeError, setattr, m, 'input_subtract', b)
nose.tools.assert_raises(RuntimeError, setattr, m, 'input_divide', b)
def test_correctness():
# Tests the correctness of a linear machine
c = HDF5File(MACHINE)
m = Machine(c)
def presumed(ivalue):
"""Calculates, by hand, the presumed output given the input"""
# These are the supposed preloaded values from the file "MACHINE"
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
b = numpy.array([0.3, -3.0], 'float64')
act = math.tanh
return numpy.array([ act((w[i,:]*((ivalue-isub)/idiv)).sum() + b[i]) for i in range(w.shape[0]) ], 'float64')
testing = [
[1,1,1],
[0.5,0.2,200],
[-27,35.77,0],
[12,0,0],
]
# 1D case
maxerr = numpy.ndarray((2,), 'float64')
maxerr.fill(1e-10)
for k in testing:
input = numpy.array(k, 'float64')
assert (abs(presumed(input) - m(input)) < maxerr).all()
# 2D case
output = m(testing)
for i, k in enumerate(testing):
input = numpy.array(k, 'float64')
assert (abs(presumed(input) - output[i,:]) < maxerr).all()
def test_user_allocation():
# Tests the correctness of a linear machine
c = HDF5File(MACHINE)
m = Machine(c)
def presumed(ivalue):
"""Calculates, by hand, the presumed output given the input"""
# These are the supposed preloaded values from the file "MACHINE"
isub = numpy.array([0., 0.5, 0.5], 'float64')
idiv = numpy.array([0.5, 1.0, 1.0], 'float64')
w = numpy.array([[0.4, 0.4, 0.2], [0.1, 0.2, 0.7]], 'float64')
b = numpy.array([0.3, -3.0], 'float64')
act = math.tanh
return numpy.array([ act((w[i,:]*((ivalue-isub)/idiv)).sum() + b[i]) for i in range(w.shape[0]) ], 'float64')
testing = [
[1,1,1],
[0.5,0.2,200],
[-27,35.77,0],
[12,0,0],
]
# 1D case
maxerr = numpy.ndarray((2,), 'float64')
maxerr.fill(1e-10)
output = numpy.ndarray((2,), 'float64')
for k in testing:
input = numpy.array(k, 'float64')
m(input, output)
assert (abs(presumed(input) - output) < maxerr).all()
# 2D case
output = numpy.ndarray((len(testing), 2), 'float64')
m(testing, output)
for i, k in enumerate(testing):
input = numpy.array(k, 'float64')
assert (abs(presumed(input) - output[i,:]) < maxerr).all()
def test_comparisons():
# Start by creating the data
w1 = numpy.array([[0.4, 0.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
w2 = numpy.array([[0.4, 1.1], [0.4, 0.2], [0.2, 0.7]], 'float64')
b1 = numpy.array([0.3, -3.0], 'float64')
b2 = numpy.array([0.3, 3.0], 'float64')
isub1 = numpy.array([0., 0.5, 0.5], 'float64')
isub2 = numpy.array([0.5, 0.5, 0.5], 'float64')
idiv1 = numpy.array([0.5, 1.0, 1.0], 'float64')
idiv2 = numpy.array([1.5, 1.0, 1.0], 'float64')
# Creates Machine's
m1 = Machine(w1)
m1.input_subtract = isub1
m1.input_divide = idiv1
m1.biases = b1
m1.activation = HyperbolicTangent()
m1b = Machine(m1)
m1c = Machine(w1)
m1c.input_subtract = isub1
m1c.input_divide = idiv1
m1c.biases = b1
m1c.activation = HyperbolicTangent()
m2 = Machine(w2)
m2.input_subtract = isub1
m2.input_divide = idiv1
m2.biases = b1
m2.activation = HyperbolicTangent()
m3 = Machine(w1)
m3.input_subtract = isub2
m3.input_divide = idiv1
m3.biases = b1
m3.activation = HyperbolicTangent()
m4 = Machine(w1)
m4.input_subtract = isub1
m4.input_divide = idiv2
m4.biases = b1
m4.activation = HyperbolicTangent()
m5 = Machine(w1)
m5.input_subtract = isub1
m5.input_divide = idiv1
m5.biases = b2
m5.activation = HyperbolicTangent()
m6 = Machine(w1)
m6.input_subtract = isub1
m6.input_divide = idiv1
m6.biases = b1
m6.activation = Identity()
# Compares them using the overloaded operators == and !=
assert m1 == m1b
assert not m1 != m1b
assert m1.is_similar_to(m1b)
assert m1 == m1c
assert not m1 != m1c
assert m1.is_similar_to(m1c)
assert not m1 == m2
assert m1 != m2
assert not m1.is_similar_to(m2)
assert not m1 == m3
assert m1 != m3
assert not m1.is_similar_to(m3)
assert not m1 == m4
assert m1 != m4
assert not m1.is_similar_to(m4)
assert not m1 == m5
assert m1 != m5
assert not m1.is_similar_to(m5)
assert not m1 == m6
assert m1 != m6
assert not m1.is_similar_to(m6)
def test_pca_settings():
T = PCATrainer()
assert T.use_svd == True
T.use_svd = False
assert T.use_svd == False
T = PCATrainer(False)
assert T.use_svd == False
T.use_svd = True
assert T.use_svd == True
def test_pca_versus_matlab_princomp():
# Tests our SVD/PCA extractor.
data = numpy.array([
[2.5, 2.4],
[0.5, 0.7],
[2.2, 2.9],
[1.9, 2.2],
[3.1, 3.0],
[2.3, 2.7],
[2., 1.6],
[1., 1.1],
[1.5, 1.6],
[1.1, 0.9],
], dtype='float64')
# Expected results (from Matlab's princomp) - a good ground truth?
eig_val_correct = numpy.array([1.28402771, 0.0490834], 'float64')
eig_vec_correct = numpy.array([[-0.6778734, -0.73517866], [-0.73517866, 0.6778734]], 'float64')
T = PCATrainer()
machine_svd, eig_vals_svd = T.train(data)
assert numpy.allclose(abs(machine_svd.weights/eig_vec_correct), 1.0)
assert numpy.allclose(eig_vals_svd, eig_val_correct)
assert machine_svd.weights.shape == (2,2)
def test_pca_versus_matlab_princomp_safe():
# Tests our SVD/PCA extractor.
data = numpy.array([
[2.5, 2.4],
[0.5, 0.7],
[2.2, 2.9],
[1.9, 2.2],
[3.1, 3.0],
[2.3, 2.7],
[2., 1.6],
[1., 1.1],
[1.5, 1.6],
[1.1, 0.9],
], dtype='float64')
# Expected results (from Matlab's princomp) - a good ground truth?
eig_val_correct = numpy.array([1.28402771, 0.0490834], 'float64')
eig_vec_correct = numpy.array([[-0.6778734, -0.73517866], [-0.73517866, 0.6778734]], 'float64')
T = PCATrainer()
T.safe_svd = True
machine_safe_svd, eig_vals_safe_svd = T.train(data)
assert numpy.allclose(abs(machine_safe_svd.weights/eig_vec_correct), 1.0)
assert numpy.allclose(eig_vals_safe_svd, eig_val_correct)
assert machine_safe_svd.weights.shape == (2,2)
def test_pca_versus_matlab_princomp_cov():
# Tests our SVD/PCA extractor.
data = numpy.array([
[2.5, 2.4],
[0.5, 0.7],
[2.2, 2.9],
[1.9, 2.2],
[3.1, 3.0],
[2.3, 2.7],
[2., 1.6],
[1., 1.1],
[1.5, 1.6],
[1.1, 0.9],
], dtype='float64')
# Expected results (from Matlab's princomp) - a good ground truth?
eig_val_correct = numpy.array([1.28402771, 0.0490834], 'float64')
eig_vec_correct = numpy.array([[-0.6778734, -0.73517866], [-0.73517866, 0.6778734]], 'float64')
T = PCATrainer()
T.use_svd = False #make it use the covariance method
machine_cov, eig_vals_cov = T.train(data)
assert numpy.allclose(abs(machine_cov.weights/eig_vec_correct), 1.0)
assert numpy.allclose(eig_vals_cov, eig_val_correct)
assert machine_cov.weights.shape == (2,2)
def test_pca_versus_matlab_princomp_2():
# Tests our SVD/PCA extractor.
data = numpy.array([
[1,2, 3,5,7],
[2,4,19,0,2],
[3,6, 5,3,3],
[4,8,13,4,2],
], dtype='float64')
# Expected results (from Matlab's princomp) - a good ground truth?
eig_val_correct = numpy.array([61.9870996, 9.49613738, 1.85009634], 'float64')
# Train method 1
T = PCATrainer()
machine_svd, eig_vals_svd = T.train(data)
assert numpy.allclose(eig_vals_svd, eig_val_correct)
assert machine_svd.weights.shape == (5,3)
machine_safe_svd, eig_vals_safe_svd = T.train(data)
assert numpy.allclose(eig_vals_safe_svd, eig_val_correct)
assert machine_safe_svd.weights.shape == (5,3)
T.use_svd = False #make it use the covariance method
machine_cov, eig_vals_cov = T.train(data)
assert numpy.allclose(eig_vals_cov, eig_val_correct)
assert machine_cov.weights.shape == (5,3)
def test_pca_trainer_comparisons():
# Constructors and comparison operators
t1 = PCATrainer()
t2 = PCATrainer()
t3 = PCATrainer(t2)
t4 = t3
assert t1 == t2
assert t1.is_similar_to(t2)
assert t1 == t3
assert t1.is_similar_to(t3)
assert t1 == t4
assert t1.is_similar_to(t4)
t5 = PCATrainer(False)
t6 = PCATrainer(False)
assert t5 == t6
assert t5.is_similar_to(t6)
assert t5 != t1
t7 = PCATrainer(t1)
assert t1 == t7
def test_pca_trainer_comparisons_safe():
t1 = PCATrainer()
t7 = PCATrainer(t1)
t7.safe_svd = True
assert t1 != t7
t7.safe_svd = False
assert t1 == t7
def test_pca_svd_vs_cov_random_1():
# Tests our SVD/PCA extractor.
data = numpy.random.rand(1000,4)
T = PCATrainer()
machine_svd, eig_vals_svd = T.train(data)
T.use_svd = False #make it use the covariance method
machine_cov, eig_vals_cov = T.train(data)
assert numpy.allclose(eig_vals_svd, eig_vals_cov)
assert machine_svd.weights.shape == (4,4)
assert numpy.allclose(machine_svd.input_subtract, machine_cov.input_subtract)
assert numpy.allclose(machine_svd.input_divide, machine_cov.input_divide)
assert numpy.allclose(abs(machine_svd.weights/machine_cov.weights), 1.0)
def test_pca_svd_vs_cov_random_1_safe():
# Tests our SVD/PCA extractor.
data = numpy.random.rand(1000,4)
# Train method 1
T = PCATrainer()
T.safe_svd = True
machine_svd, eig_vals_svd = T.train(data)
T.use_svd = False #make it use the covariance method
machine_cov, eig_vals_cov = T.train(data)
assert numpy.allclose(eig_vals_svd, eig_vals_cov)
assert machine_svd.weights.shape == (4,4)
assert numpy.allclose(machine_svd.input_subtract, machine_cov.input_subtract)
assert numpy.allclose(machine_svd.input_divide, machine_cov.input_divide)
assert numpy.allclose(abs(machine_svd.weights/machine_cov.weights), 1.0)
def test_pca_svd_vs_cov_random_2():
# Tests our SVD/PCA extractor.
data = numpy.random.rand(15,60)
# Train method 1
T = PCATrainer()
machine_svd, eig_vals_svd = T.train(data)
T.use_svd = False #make it use the covariance method
machine_cov, eig_vals_cov = T.train(data)
assert numpy.allclose(eig_vals_svd, eig_vals_cov)
assert machine_svd.weights.shape == (60,14)
assert numpy.allclose(machine_svd.input_subtract, machine_cov.input_subtract)
assert numpy.allclose(machine_svd.input_divide, machine_cov.input_divide)
assert numpy.allclose(abs(machine_svd.weights/machine_cov.weights), 1.0)
def test_pca_svd_vs_cov_random_2_safe():
# Tests our SVD/PCA extractor.
data = numpy.random.rand(15,60)
# Train method 1
T = PCATrainer()
T.safe_svd = True
machine_svd, eig_vals_svd = T.train(data)
T.use_svd = False #make it use the covariance method
machine_cov, eig_vals_cov = T.train(data)
assert numpy.allclose(eig_vals_svd, eig_vals_cov)
assert machine_svd.weights.shape == (60,14)
assert numpy.allclose(machine_svd.input_subtract, machine_cov.input_subtract)
assert numpy.allclose(machine_svd.input_divide, machine_cov.input_divide)
assert numpy.allclose(abs(machine_svd.weights/machine_cov.weights), 1.0)
def test_fisher_lda_settings():
t = FisherLDATrainer()
assert t.use_pinv == False
assert t.strip_to_rank == True
t.use_pinv = True
assert t.use_pinv
t.strip_to_rank = False
assert t.strip_to_rank == False
t = FisherLDATrainer(use_pinv=True)
assert t.use_pinv
assert t.strip_to_rank
t = FisherLDATrainer(strip_to_rank=False)
assert t.use_pinv == False
assert t.strip_to_rank == False
def test_fisher_lda():
# Tests our Fisher/LDA trainer for linear machines for a simple 2-class
# "fake" problem:
data = [
numpy.array([
[2.5, 2.4],
[2.2, 2.9],
[1.9, 2.2],
[3.1, 3.0],
[2.3, 2.7],
], dtype='float64'),
numpy.array([
[0.5, 0.7],
[2., 1.6],
[1., 1.1],
[1.5, 1.6],
[1.1, 0.9],
], dtype='float64'),
]
# Expected results
exp_trans_data = [
[1.0019, 3.1205, 0.9405, 2.4962, 2.2949],
[-2.9042, -1.3179, -2.0172, -0.7720, -2.8428]
]
exp_mean = numpy.array([1.8100, 1.9100])
exp_val = numpy.array([5.394526])
exp_mach = numpy.array([[-0.291529], [0.956562]])
T = FisherLDATrainer()
machine, eig_vals = T.train(data)
# Makes sure results are good
assert numpy.alltrue(abs(machine.input_subtract - exp_mean) < 1e-6)
assert numpy.alltrue(abs(machine.weights - exp_mach) < 1e-6)
assert numpy.alltrue(abs(eig_vals - exp_val) < 1e-6)
# Use the pseudo-inverse method
T.use_pinv = True
machine_pinv, eig_vals_pinv = T.train(data)
# Makes sure results are good
assert numpy.alltrue(abs(machine_pinv.input_subtract - exp_mean) < 1e-6)
assert numpy.alltrue(abs(eig_vals_pinv - exp_val) < 1e-6)
# Eigen vectors could be off by a constant
weight_ratio = machine_pinv.weights[0] / machine.weights[0]
normalized_weights = (machine_pinv.weights.T/weight_ratio).T
assert numpy.allclose(machine.weights, normalized_weights)
def test_fisher_lda_bis():
# Tests our Fisher/LDA trainer for linear machines for a simple 2-class
# "fake" problem:
data = [
numpy.array([
[2.5, 2.4, 2.5],
[2.2, 2.9, 3.],
[1.9, 2.2, 2.],
[3.1, 3.0, 3.1],
[2.3, 2.7, 2.4],
], dtype='float64'),
numpy.array([
[-0.5, -0.7, -1.],
[-2., -1.6, -2.],
[-1., -1.1, -1.],
[-1.5, -1.6, -1.6],
[-1.1, -0.9, -1.],
], dtype='float64'),
]
# Expected results after resizing
exp_mean = numpy.array([0.59, 0.73, 0.64])
exp_val = numpy.array([33.9435556])
exp_mach = numpy.array([[0.14322439], [-0.98379062], [0.10790173]])
T = FisherLDATrainer()
machine, eig_vals = T.train(data)
# Makes sure results are good
machine.resize(3,1) # eigenvalue close to 0 are not significant (just keep the first one)
assert numpy.alltrue(abs(machine.input_subtract - exp_mean) < 1e-6)
assert numpy.alltrue(abs(eig_vals[0:1] - exp_val[0:1]) < 1e-6)
assert numpy.alltrue(abs(machine.weights[:,0] - exp_mach[:,0]) < 1e-6)
# Use the pseudo-inverse method
T.use_pinv = True
machine_pinv, eig_vals_pinv = T.train(data)
# Makes sure results are good
machine_pinv.resize(3,1) # eigenvalue close to 0 are not significant (just keep the first one)
assert numpy.alltrue(abs(machine_pinv.input_subtract - exp_mean) < 1e-6)
assert numpy.alltrue(abs(eig_vals_pinv[0:1] - exp_val[0:1]) < 1e-6)
# Eigen vectors could be off by a constant
weight_ratio = machine_pinv.weights[0] / machine.weights[0]
normalized_weights = (machine_pinv.weights.T/weight_ratio).T
assert numpy.allclose(machine.weights, normalized_weights)
def test_fisher_lda_comparisons():
# Constructors and comparison operators
t1 = FisherLDATrainer()
t2 = FisherLDATrainer()
t3 = FisherLDATrainer(t2)
t4 = t3
assert t1 == t2
assert t1.is_similar_to(t2)
assert t1 == t3
assert t1.is_similar_to(t3)
assert t1 == t4
assert t1.is_similar_to(t4)
t3 = FisherLDATrainer(use_pinv=True)
t4 = FisherLDATrainer(use_pinv=True)
assert t3 == t4
assert t3.is_similar_to(t4)
assert t3 != t1
assert not t3.is_similar_to(t2)
def test_whitening_initialization():
# Constructors and comparison operators
t1 = WhiteningTrainer()
t2 = WhiteningTrainer()
t3 = WhiteningTrainer(t2)
t4 = t3
assert t1 == t2
assert t1.is_similar_to(t2)
assert t1 == t3
assert t1.is_similar_to(t3)
assert t1 == t4
assert t1.is_similar_to(t4)
def test_whitening_train():
# Tests our Whitening extractor.
data = numpy.array([[ 1.2622, -1.6443, 0.1889],
[ 0.4286, -0.8922, 1.3020],
[-0.6613, 0.0430, 0.6377],
[-0.8718, -0.4788, 0.3988],
[-0.0098, -0.3121,-0.1807],
[ 0.4301, 0.4886, -0.1456]])
sample = numpy.array([1, 2, 3.])
# Expected results (from matlab)
mean_ref = numpy.array([0.096324163333333, -0.465965438333333, 0.366839091666667])
whit_ref = numpy.array([[1.608410253685985, 0, 0],
[1.079813355720326, 1.411083365535711, 0],
[0.693459921529905, 0.571417184139332, 1.800117179839927]])
sample_whitened_ref = numpy.array([5.942255453628436, 4.984316201643742, 4.739998188373740])
# Runs whitening (first method)
t = WhiteningTrainer()
m = Machine(3,3)
t.train(data, m)
s = m.forward(sample)
# Makes sure results are good
eps = 1e-4
assert numpy.allclose(m.input_subtract, mean_ref, eps, eps)
assert numpy.allclose(m.weights, whit_ref, eps, eps)
assert numpy.allclose(s, sample_whitened_ref, eps, eps)
# Runs whitening (second method)
m2 = t.train(data)
s2 = m2.forward(sample)
# Makes sure results are good
eps = 1e-4
assert numpy.allclose(m2.input_subtract, mean_ref, eps, eps)
assert numpy.allclose(m2.weights, whit_ref, eps, eps)
assert numpy.allclose(s2, sample_whitened_ref, eps, eps)
def test_wccn_initialization():
# Constructors and comparison operators
t1 = WCCNTrainer()
t2 = WCCNTrainer()
t3 = WCCNTrainer(t2)
t4 = t3
assert t1 == t2
assert t1.is_similar_to(t2)
assert t1 == t3
assert t1.is_similar_to(t3)
assert t1 == t4
assert t1.is_similar_to(t4)
def test_wccn_train():
# Tests our Whitening extractor.
data = [numpy.array([[ 1.2622, -1.6443, 0.1889], [ 0.4286, -0.8922, 1.3020]]),
numpy.array([[-0.6613, 0.0430, 0.6377], [-0.8718, -0.4788, 0.3988]]),
numpy.array([[-0.0098, -0.3121,-0.1807], [ 0.4301, 0.4886, -0.1456]])]
sample = numpy.array([1, 2, 3.])
# Expected results
mean_ref = numpy.array([ 0., 0., 0.])
weight_ref = numpy.array([[ 15.8455444 , 0. , 0. ],
[-10.7946764 , 2.87942129, 0. ],
[ 18.76762201, -2.19719292, 2.1505817]])
sample_wccn_ref = numpy.array([50.55905765, -0.83273618, 6.45174511])
# Runs WCCN (first method)
t = WCCNTrainer()
m = Machine(3,3)
t.train(data, m)
s = m.forward(sample)
# Makes sure results are good
eps = 1e-4
assert numpy.allclose(m.input_subtract, mean_ref, eps, eps)
assert numpy.allclose(m.weights, weight_ref, eps, eps)
assert numpy.allclose(s, sample_wccn_ref, eps, eps)
# Runs WCCN (second method)
m2 = t.train(data)
s2 = m2.forward(sample)
# Makes sure results are good
eps = 1e-4
assert numpy.allclose(m2.input_subtract, mean_ref, eps, eps)
assert numpy.allclose(m2.weights, weight_ref, eps, eps)
assert numpy.allclose(s2, sample_wccn_ref, eps, eps)
def test_cglogreg():
# Tests our LLR Trainer.
positives = numpy.array([
[1.,1.2,-1.],
[2.,2.1,2.2],
[3.,2.9,3.1],
[4.,3.7,4.],
[5.,5.2,4.9],
[6.,6.1,5.9],
[7.,7.,7.3],
], dtype='float64')
negatives = numpy.array([
[-10.,-9.2,-1.],
[-5.,-4.1,-0.5],
[-10.,-9.9,-1.8],
[-5.,-5.4,-0.3],
[-10.,-9.3,-0.7],
[-5.,-4.5,-0.5],
[-10.,-9.7,-1.2],
[-5.,-4.8,-0.2],
], dtype='float64')
# Expected trained machine
#weights_ref= numpy.array([[13.5714], [19.3997], [-0.6432]])
weights_ref= numpy.array([[1.75536], [2.69297], [-0.54142]])
#bias_ref = numpy.array([55.3255])
bias_ref = numpy.array([7.26999])
# Features and expected outputs of the trained machine
feat1 = numpy.array([1.,2.,3.])
#out1 = 105.7668
out1 = 12.78703
feat2 = numpy.array([2.,3.,4.])
#out2 = 138.0947
out2 = 16.69394
# Trains a machine (method 1)
T = CGLogRegTrainer(0.5, 1e-5, 30)
machine1 = T.train(negatives,positives)
# Makes sure results are good
assert (abs(machine1.weights - weights_ref) < 2e-4).all()
assert (abs(machine1.biases - bias_ref) < 2e-4).all()
assert abs(machine1(feat1) - out1) < 2e-4
assert abs(machine1(feat2) - out2) < 2e-4
# Trains a machine (method 2)
machine2 = Machine()
T.train(negatives, positives, machine2)
# Makes sure results are good
assert (abs(machine2.weights - weights_ref) < 2e-4).all()
assert (abs(machine2.biases - bias_ref) < 2e-4).all()
assert abs(machine2(feat1) - out1) < 2e-4
assert abs(machine2(feat2) - out2) < 2e-4
# Expected trained machine (with regularization)
weights_ref= numpy.array([[0.54926], [0.58304], [0.06558]])
bias_ref = numpy.array([0.27897])
# Trains a machine (method 1)
T = CGLogRegTrainer(0.5, 1e-5, 30, 1.)
machine1 = T.train(negatives, positives)
# Makes sure results are good
assert (abs(machine1.weights - weights_ref) < 2e-4).all()
assert (abs(machine1.biases - bias_ref) < 2e-4).all()
def test_cglogreg_keywordargs():
# Tests our LLR Trainer.
positives = numpy.array([
[1.,1.2,-1.],
[2.,2.1,2.2],
[3.,2.9,3.1],
[4.,3.7,4.],
[5.,5.2,4.9],
[6.,6.1,5.9],
[7.,7.,7.3],
], dtype='float64')
negatives = numpy.array([
[-10.,-9.2,-1.],
[-5.,-4.1,-0.5],
[-10.,-9.9,-1.8],
[-5.,-5.4,-0.3],
[-10.,-9.3,-0.7],
[-5.,-4.5,-0.5],
[-10.,-9.7,-1.2],
[-5.,-4.8,-0.2],
], dtype='float64')
# Expected trained machine
#weights_ref= numpy.array([[13.5714], [19.3997], [-0.6432]])
weights_ref= numpy.array([[1.75536], [2.69297], [-0.54142]])
#bias_ref = numpy.array([55.3255])
bias_ref = numpy.array([7.26999])
# Features and expected outputs of the trained machine
feat1 = numpy.array([1.,2.,3.])
#out1 = 105.7668
out1 = 12.78703
feat2 = numpy.array([2.,3.,4.])
#out2 = 138.0947
out2 = 16.69394
# Trains a machine (method 1)
T = CGLogRegTrainer(prior=0.5, convergence_threshold=1e-5, max_iterations=30)
machine1 = T.train(negatives,positives)
# Makes sure results are good
assert (abs(machine1.weights - weights_ref) < 2e-4).all()
assert (abs(machine1.biases - bias_ref) < 2e-4).all()
assert abs(machine1(feat1) - out1) < 2e-4
assert abs(machine1(feat2) - out2) < 2e-4
# Trains a machine (method 2)
machine2 = Machine()
T.train(negatives, positives, machine2)
# Makes sure results are good
assert (abs(machine2.weights - weights_ref) < 2e-4).all()
assert (abs(machine2.biases - bias_ref) < 2e-4).all()
assert abs(machine2(feat1) - out1) < 2e-4
assert abs(machine2(feat2) - out2) < 2e-4
# Expected trained machine (with regularization)
weights_ref= numpy.array([[0.54926], [0.58304], [0.06558]])
bias_ref = numpy.array([0.27897])
# Trains a machine (method 1)
T = CGLogRegTrainer(0.5, 1e-5, 30, 1.)
machine1 = T.train(negatives, positives)
# Makes sure results are good
assert (abs(machine1.weights - weights_ref) < 2e-4).all()
assert (abs(machine1.biases - bias_ref) < 2e-4).all()
def test_cglogreg_norm():
# read some real test data;
# for toy examples the results are quite different...
pos1 = bob.io.base.load(datafile('positives_isv.hdf5', __name__))
neg1 = bob.io.base.load(datafile('negatives_isv.hdf5', __name__))
pos2 = bob.io.base.load(datafile('positives_lda.hdf5', __name__))
neg2 = bob.io.base.load(datafile('negatives_lda.hdf5', __name__))
negatives = numpy.vstack((neg1, neg2)).T
positives = numpy.vstack((pos1, pos2)).T
# Train the machine after mean-std norm
T = CGLogRegTrainer(0.5, 1e-10, 10000, mean_std_norm=True)
machine = T.train(negatives,positives)
# assert that mean and variance are correct
mean = numpy.mean(numpy.vstack((positives, negatives)), 0)
std = numpy.std(numpy.vstack((positives, negatives)), 0)
assert (abs(machine.input_subtract - mean) < 1e-10).all()
assert (abs(machine.input_divide - std) < 1e-10).all()
# apply it to test data
test1 = [1., -50.]
test2 = [0.5, -86.]
res1 = machine(test1)
res2 = machine(test2)
# normalize training data
pos = numpy.vstack([(positives[i] - mean) / std for i in range(len(positives))])
neg = numpy.vstack([(negatives[i] - mean) / std for i in range(len(negatives))])
# re-train the machine; should give identical results
T.mean_std_norm = False
machine = T.train(neg, pos)
machine.input_subtract = mean
machine.input_divide = std
# assert that the result is the same
assert abs(machine(test1) - res1) < 1e-10
assert abs(machine(test2) - res2) < 1e-10
def test_cglogreg_norm_keyword():
# read some real test data;
# for toy examples the results are quite different...
pos1 = bob.io.base.load(datafile('positives_isv.hdf5', __name__))
neg1 = bob.io.base.load(datafile('negatives_isv.hdf5', __name__))
pos2 = bob.io.base.load(datafile('positives_lda.hdf5', __name__))
neg2 = bob.io.base.load(datafile('negatives_lda.hdf5', __name__))
negatives = numpy.vstack((neg1, neg2)).T
positives = numpy.vstack((pos1, pos2)).T
# Train the machine after mean-std norm
T = CGLogRegTrainer(0.5, 1e-10, 10000, reg=0.0001, mean_std_norm=True)
machine = T.train(negatives,positives)
# assert that mean and variance are correct
mean = numpy.mean(numpy.vstack((positives, negatives)), 0)
std = numpy.std(numpy.vstack((positives, negatives)), 0)
assert (abs(machine.input_subtract - mean) < 1e-10).all()
assert (abs(machine.input_divide - std) < 1e-10).all()
# apply it to test data
test1 = [1., -50.]
test2 = [0.5, -86.]
res1 = machine(test1)
res2 = machine(test2)
# normalize training data
pos = numpy.vstack([(positives[i] - mean) / std for i in range(len(positives))])
neg = numpy.vstack([(negatives[i] - mean) / std for i in range(len(negatives))])
# re-train the machine; should give identical results
T.mean_std_norm = False
machine = T.train(neg, pos)
machine.input_subtract = mean
machine.input_divide = std
# assert that the result is the same
assert abs(machine(test1) - res1) < 1e-10
assert abs(machine(test2) - res2) < 1e-10
@nose.tools.nottest
def test_cglogreg_norm_slow():
pos1 = bob.io.base.load(datafile('positives_isv.hdf5', __name__))
neg1 = bob.io.base.load(datafile('negatives_isv.hdf5', __name__))
pos2 = bob.io.base.load(datafile('positives_lda.hdf5', __name__))
neg2 = bob.io.base.load(datafile('negatives_lda.hdf5', __name__))
negatives = numpy.vstack((neg1, neg2)).T
positives = numpy.vstack((pos1, pos2)).T
T = CGLogRegTrainer(0.5, 1e-10, 10000, mean_std_norm=True)
# apply it to test data
test1 = [1., -50.]
test2 = [0.5, -86.]
res1 = machine(test1)
res2 = machine(test2)
# try the training without normalization
machine = T.train(negatives, positives)
# check that the results are at least approximately equal
# Note: lower values for epsilon and higher number of iterations improve the stability)
assert abs(machine(test1) - res1) < 1e-3
assert abs(machine(test2) - res2) < 1e-3
|
|
import docker
import pytest
from . import fake_api
from docker import auth
from .api_test import (
BaseAPIClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
fake_resolve_authconfig
)
try:
from unittest import mock
except ImportError:
import mock
class ImageTest(BaseAPIClientTest):
def test_image_viz(self):
with pytest.raises(Exception):
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
def test_images(self):
self.client.images(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
self.client.images(all=True, quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
self.client.images(quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
self.client.images(filters={'dangling': True})
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_pull(self):
self.client.pull('joffrey/test001')
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertFalse(args[1]['stream'])
def test_pull_stream(self):
self.client.pull('joffrey/test001', stream=True)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertTrue(args[1]['stream'])
def test_commit(self):
self.client.commit(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'commit',
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': '3cc2351ab11b',
'author': None,
'changes': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'images/e9aa60c60128',
params={'force': False, 'noprune': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
self.client.history(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/history',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_bytes(self):
stream = (i for i in range(0, 100))
self.client.import_image(
stream,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-',
},
headers={
'Content-Type': 'application/tar',
},
data=stream,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_image(self):
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_image(arg)
self.assertEqual(
excinfo.value.args[0], 'Resource ID was not provided'
)
def test_insert_image(self):
try:
self.client.insert(fake_api.FAKE_IMAGE_NAME,
fake_api.FAKE_URL, fake_api.FAKE_PATH)
except docker.errors.DeprecatedMethod:
self.assertTrue(
docker.utils.compare_version('1.12', self.client._version) >= 0
)
return
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/insert',
params={
'url': fake_api.FAKE_URL,
'path': fake_api.FAKE_PATH
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_auth(self):
auth_config = {
'username': "test_user",
'password': "test_password",
'serveraddress': "test_server",
}
encoded_auth = auth.encode_header(auth_config)
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
auth_config=auth_config
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json',
'X-Registry-Auth': encoded_auth},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
self.client.get_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/e9aa60c60128/get',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
self.client.load_image('Byte Stream....')
fake_request.assert_called_with(
'POST',
url_prefix + 'images/load',
data='Byte Stream....',
stream=True,
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image_quiet(self):
self.client.load_image('Byte Stream....', quiet=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/load',
data='Byte Stream....',
stream=True,
params={'quiet': True},
timeout=DEFAULT_TIMEOUT_SECONDS
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from keystone import config
from keystone import controllers
from keystone.openstack.common.fixture import moxstubout
from keystone.openstack.common import jsonutils
from keystone import tests
CONF = config.CONF
v2_MEDIA_TYPES = [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.identity-v2.0+json"
}, {
"base": "application/xml",
"type": "application/"
"vnd.openstack.identity-v2.0+xml"
}
]
v2_HTML_DESCRIPTION = {
"rel": "describedby",
"type": "text/html",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/"
"content/"
}
v2_PDF_DESCRIPTION = {
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.openstack.org/api/"
"openstack-identity-service/2.0/"
"identity-dev-guide-2.0.pdf"
}
v2_EXPECTED_RESPONSE = {
"id": "v2.0",
"status": "stable",
"updated": "2013-03-06T00:00:00Z",
"links": [
{
"rel": "self",
"href": "", # Will get filled in after initialization
},
v2_HTML_DESCRIPTION,
v2_PDF_DESCRIPTION
],
"media-types": v2_MEDIA_TYPES
}
v2_VERSION_RESPONSE = {
"version": v2_EXPECTED_RESPONSE
}
v3_MEDIA_TYPES = [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.identity-v3+json"
}, {
"base": "application/xml",
"type": "application/"
"vnd.openstack.identity-v3+xml"
}
]
v3_EXPECTED_RESPONSE = {
"id": "v3.0",
"status": "stable",
"updated": "2013-03-06T00:00:00Z",
"links": [
{
"rel": "self",
"href": "", # Will get filled in after initialization
}
],
"media-types": v3_MEDIA_TYPES
}
v3_VERSION_RESPONSE = {
"version": v3_EXPECTED_RESPONSE
}
VERSIONS_RESPONSE = {
"versions": {
"values": [
v3_EXPECTED_RESPONSE,
v2_EXPECTED_RESPONSE
]
}
}
class VersionTestCase(tests.TestCase):
def setUp(self):
super(VersionTestCase, self).setUp()
self.load_backends()
self.public_app = self.loadapp('keystone', 'main')
self.admin_app = self.loadapp('keystone', 'admin')
port = random.randint(10000, 30000)
self.opt(public_port=port, admin_port=port)
fixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = fixture.stubs
def _paste_in_port(self, response, port):
for link in response['links']:
if link['rel'] == 'self':
link['href'] = port
def test_public_versions(self):
client = self.client(self.public_app)
resp = client.get('/')
self.assertEqual(resp.status_int, 300)
data = jsonutils.loads(resp.body)
expected = VERSIONS_RESPONSE
for version in expected['versions']['values']:
if version['id'] == 'v3.0':
self._paste_in_port(
version, 'http://localhost:%s/v3/' % CONF.public_port)
elif version['id'] == 'v2.0':
self._paste_in_port(
version, 'http://localhost:%s/v2.0/' % CONF.public_port)
self.assertEqual(data, expected)
def test_admin_versions(self):
client = self.client(self.admin_app)
resp = client.get('/')
self.assertEqual(resp.status_int, 300)
data = jsonutils.loads(resp.body)
expected = VERSIONS_RESPONSE
for version in expected['versions']['values']:
if version['id'] == 'v3.0':
self._paste_in_port(
version, 'http://localhost:%s/v3/' % CONF.admin_port)
elif version['id'] == 'v2.0':
self._paste_in_port(
version, 'http://localhost:%s/v2.0/' % CONF.admin_port)
self.assertEqual(data, expected)
def test_public_version_v2(self):
client = self.client(self.public_app)
resp = client.get('/v2.0/')
self.assertEqual(resp.status_int, 200)
data = jsonutils.loads(resp.body)
expected = v2_VERSION_RESPONSE
self._paste_in_port(expected['version'],
'http://localhost:%s/v2.0/' % CONF.public_port)
self.assertEqual(data, expected)
def test_admin_version_v2(self):
client = self.client(self.admin_app)
resp = client.get('/v2.0/')
self.assertEqual(resp.status_int, 200)
data = jsonutils.loads(resp.body)
expected = v2_VERSION_RESPONSE
self._paste_in_port(expected['version'],
'http://localhost:%s/v2.0/' % CONF.admin_port)
self.assertEqual(data, expected)
def test_public_version_v3(self):
client = self.client(self.public_app)
resp = client.get('/v3/')
self.assertEqual(resp.status_int, 200)
data = jsonutils.loads(resp.body)
expected = v3_VERSION_RESPONSE
self._paste_in_port(expected['version'],
'http://localhost:%s/v3/' % CONF.public_port)
self.assertEqual(data, expected)
def test_admin_version_v3(self):
client = self.client(self.public_app)
resp = client.get('/v3/')
self.assertEqual(resp.status_int, 200)
data = jsonutils.loads(resp.body)
expected = v3_VERSION_RESPONSE
self._paste_in_port(expected['version'],
'http://localhost:%s/v3/' % CONF.admin_port)
self.assertEqual(data, expected)
def test_v2_disabled(self):
self.stubs.Set(controllers, '_VERSIONS', ['v3'])
client = self.client(self.public_app)
# request to /v2.0 should fail
resp = client.get('/v2.0/')
self.assertEqual(resp.status_int, 404)
# request to /v3 should pass
resp = client.get('/v3/')
self.assertEqual(resp.status_int, 200)
data = jsonutils.loads(resp.body)
expected = v3_VERSION_RESPONSE
self._paste_in_port(expected['version'],
'http://localhost:%s/v3/' % CONF.public_port)
self.assertEqual(data, expected)
# only v3 information should be displayed by requests to /
v3_only_response = {
"versions": {
"values": [
v3_EXPECTED_RESPONSE
]
}
}
self._paste_in_port(v3_only_response['versions']['values'][0],
'http://localhost:%s/v3/' % CONF.public_port)
resp = client.get('/')
self.assertEqual(resp.status_int, 300)
data = jsonutils.loads(resp.body)
self.assertEqual(data, v3_only_response)
def test_v3_disabled(self):
self.stubs.Set(controllers, '_VERSIONS', ['v2.0'])
client = self.client(self.public_app)
# request to /v3 should fail
resp = client.get('/v3/')
self.assertEqual(resp.status_int, 404)
# request to /v2.0 should pass
resp = client.get('/v2.0/')
self.assertEqual(resp.status_int, 200)
data = jsonutils.loads(resp.body)
expected = v2_VERSION_RESPONSE
self._paste_in_port(expected['version'],
'http://localhost:%s/v2.0/' % CONF.public_port)
self.assertEqual(data, expected)
# only v2 information should be displayed by requests to /
v2_only_response = {
"versions": {
"values": [
v2_EXPECTED_RESPONSE
]
}
}
self._paste_in_port(v2_only_response['versions']['values'][0],
'http://localhost:%s/v2.0/' % CONF.public_port)
resp = client.get('/')
self.assertEqual(resp.status_int, 300)
data = jsonutils.loads(resp.body)
self.assertEqual(data, v2_only_response)
class XmlVersionTestCase(tests.TestCase):
REQUEST_HEADERS = {'Accept': 'application/xml'}
DOC_INTRO = '<?xml version="1.0" encoding="UTF-8"?>'
XML_NAMESPACE_ATTR = 'xmlns="http://docs.openstack.org/identity/api/v2.0"'
v2_VERSION_DATA = """
<version %(v2_namespace)s status="stable" updated="2013-03-06T00:00:00Z"
id="v2.0">
<media-types>
<media-type base="application/json" type="application/\
vnd.openstack.identity-v2.0+json"/>
<media-type base="application/xml" type="application/\
vnd.openstack.identity-v2.0+xml"/>
</media-types>
<links>
<link href="http://localhost:%%(port)s/v2.0/" rel="self"/>
<link href="http://docs.openstack.org/api/openstack-identity-service/\
2.0/content/" type="text/html" rel="describedby"/>
<link href="http://docs.openstack.org/api/openstack-identity-service/\
2.0/identity-dev-guide-2.0.pdf" type="application/pdf" rel="describedby"/>
</links>
<link href="http://localhost:%%(port)s/v2.0/" rel="self"/>
<link href="http://docs.openstack.org/api/openstack-identity-service/\
2.0/content/" type="text/html" rel="describedby"/>
<link href="http://docs.openstack.org/api/openstack-identity-service/\
2.0/identity-dev-guide-2.0.pdf" type="application/pdf" rel="describedby"/>
</version>
"""
v2_VERSION_RESPONSE = ((DOC_INTRO + v2_VERSION_DATA) %
dict(v2_namespace=XML_NAMESPACE_ATTR))
v3_VERSION_DATA = """
<version %(v3_namespace)s status="stable" updated="2013-03-06T00:00:00Z"
id="v3.0">
<media-types>
<media-type base="application/json" type="application/\
vnd.openstack.identity-v3+json"/>
<media-type base="application/xml" type="application/\
vnd.openstack.identity-v3+xml"/>
</media-types>
<links>
<link href="http://localhost:%%(port)s/v3/" rel="self"/>
</links>
</version>
"""
v3_VERSION_RESPONSE = ((DOC_INTRO + v3_VERSION_DATA) %
dict(v3_namespace=XML_NAMESPACE_ATTR))
VERSIONS_RESPONSE = ((DOC_INTRO + """
<versions %(namespace)s>
""" +
v3_VERSION_DATA +
v2_VERSION_DATA + """
</versions>
""") % dict(namespace=XML_NAMESPACE_ATTR, v3_namespace='', v2_namespace=''))
def setUp(self):
super(XmlVersionTestCase, self).setUp()
self.load_backends()
self.public_app = self.loadapp('keystone', 'main')
self.admin_app = self.loadapp('keystone', 'admin')
port = random.randint(10000, 30000)
self.opt(public_port=port, admin_port=port)
fixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = fixture.stubs
def test_public_versions(self):
client = self.client(self.public_app)
resp = client.get('/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 300)
data = resp.body
expected = self.VERSIONS_RESPONSE % dict(port=CONF.public_port)
self.assertEqualXML(data, expected)
def test_admin_versions(self):
client = self.client(self.admin_app)
resp = client.get('/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 300)
data = resp.body
expected = self.VERSIONS_RESPONSE % dict(port=CONF.admin_port)
self.assertEqualXML(data, expected)
def test_public_version_v2(self):
client = self.client(self.public_app)
resp = client.get('/v2.0/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 200)
data = resp.body
expected = self.v2_VERSION_RESPONSE % dict(port=CONF.public_port)
self.assertEqualXML(data, expected)
def test_admin_version_v2(self):
client = self.client(self.admin_app)
resp = client.get('/v2.0/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 200)
data = resp.body
expected = self.v2_VERSION_RESPONSE % dict(port=CONF.admin_port)
self.assertEqualXML(data, expected)
def test_public_version_v3(self):
client = self.client(self.public_app)
resp = client.get('/v3/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 200)
data = resp.body
expected = self.v3_VERSION_RESPONSE % dict(port=CONF.public_port)
self.assertEqualXML(data, expected)
def test_admin_version_v3(self):
client = self.client(self.public_app)
resp = client.get('/v3/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 200)
data = resp.body
expected = self.v3_VERSION_RESPONSE % dict(port=CONF.admin_port)
self.assertEqualXML(data, expected)
def test_v2_disabled(self):
self.stubs.Set(controllers, '_VERSIONS', ['v3'])
client = self.client(self.public_app)
# request to /v3 should pass
resp = client.get('/v3/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 200)
data = resp.body
expected = self.v3_VERSION_RESPONSE % dict(port=CONF.public_port)
self.assertEqualXML(data, expected)
# only v3 information should be displayed by requests to /
v3_only_response = ((self.DOC_INTRO + '<versions %(namespace)s>' +
self.v3_VERSION_DATA + '</versions>') %
dict(namespace=self.XML_NAMESPACE_ATTR,
v3_namespace='') %
dict(port=CONF.public_port))
resp = client.get('/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 300)
data = resp.body
self.assertEqualXML(data, v3_only_response)
def test_v3_disabled(self):
self.stubs.Set(controllers, '_VERSIONS', ['v2.0'])
client = self.client(self.public_app)
# request to /v2.0 should pass
resp = client.get('/v2.0/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 200)
data = resp.body
expected = self.v2_VERSION_RESPONSE % dict(port=CONF.public_port)
self.assertEqualXML(data, expected)
# only v2 information should be displayed by requests to /
v2_only_response = ((self.DOC_INTRO + '<versions %(namespace)s>' +
self.v2_VERSION_DATA + '</versions>') %
dict(namespace=self.XML_NAMESPACE_ATTR,
v2_namespace='') %
dict(port=CONF.public_port))
resp = client.get('/', headers=self.REQUEST_HEADERS)
self.assertEqual(resp.status_int, 300)
data = resp.body
self.assertEqualXML(data, v2_only_response)
|
|
# -*- coding: utf-8 -*-
"""Admin forms."""
from flask_wtf import Form
from wtforms.validators import DataRequired, Email, EqualTo, Length
from wtforms.widgets import html_params
from wtforms import BooleanField, StringField, TextAreaField, DateTimeField, PasswordField, SelectMultipleField
from flask_wtf.file import FileField, FileAllowed
from innovator.user.models import User, Reviewer
import json
class NewsComposeForm(Form):
"""
Compose new news.
:cvar StringField title: Handles the title of the news. Data is required.
:cvar TextAreaField summary: Handles news summary. Data is required.
:cvar TextAreaField content: Handles raw Markdown for news content. Data is required.
:cvar FileField image: Handles image upload. Only jpg, png, gif or bmp file is allowed, but file is not required in this field. If no file provided, a default image will be used as news image.
"""
title = StringField('Title', validators=[DataRequired()])
summary = TextAreaField('Summary', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
image = FileField('News Image (Optional)', validators=[FileAllowed(['jpg', 'png', 'gif', 'bmp'], 'Images only.')])
def validate(self):
initial_validation = super(NewsComposeForm, self).validate()
if not initial_validation:
return False
return True
class NewsEditForm(Form):
"""
Edit existing news.
:cvar StringField title: Handles the title of the news. Data is required.
:cvar TextAreaField summary: Handles news summary. Data is required.
:cvar TextAreaField content: Handles raw Markdown for news content. Data is required.
:cvar FileField image: Handles image upload. Only jpg, png, gif or bmp file is allowed, but file is not required in this field. If no file provided, the image file won't change.
"""
title = StringField('Title', validators=[DataRequired()])
summary = TextAreaField('Summary', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
image = FileField('News Image (Optional)', validators=[FileAllowed(['jpg', 'png', 'gif', 'bmp'], 'Images only.')])
def validate(self):
initial_validation = super(NewsEditForm, self).validate()
if not initial_validation:
return False
return True
class NewsDeleteForm(Form):
"""
An empty form for deleting existing news. Used for CSRF protection.
"""
class EventDeleteForm(Form):
"""
An empty form for deleting an event. Used for CSRF protection.
"""
class EventEditForm(Form):
"""
Event edit/create form.
:cvar StringField name: Name of event. Data is required.
:cvar TextAreaField description: Event description. Data is required.
:cvar DateTimeField start: Start time of event. It displays as a string field in webpage, but only accepts formated time string. Data is required in this field.
:cvar DateTimeField end: End time of event. It behaves the same as ``start`` field. Data is required in this field.
.. note::
``DateTimeField`` displays like a string field, but only accepts formated date time string.
Format: ``YYYY-MM-DD HH:MM:SS``
"""
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
start = DateTimeField('Start Time', validators=[DataRequired()])
end = DateTimeField('End Time', validators=[DataRequired()])
extra = StringField('Extra Information', validators=[DataRequired()])
score_detail = TextAreaField('Scoring Detail', validators=[DataRequired()])
def validate(self):
initial_validation = super(EventEditForm, self).validate()
if not initial_validation:
return False
try:
extra_list = json.loads(self.extra.data)
if type(extra_list) is not list:
self.extra.errors.append('Bad syntax.')
return False
except:
self.extra.errors.append('Bad syntax.')
return False
try:
score_detail_dict = json.loads(self.score_detail.data)
if type(score_detail_dict) is dict:
for key in score_detail_dict:
if type(key) is str:
content = score_detail_dict[key]
if type(content) is list:
for item in content:
if type(item) is str:
return True
else:
self.score_detail.errors.append('Subsection name must be string.')
return False
else:
self.score_detail.errors.append('Section content must be list.')
return False
else:
self.score_detail.errors.append('Section name must be string.')
return False
else:
self.score_detail.errors.append('Bad syntax.')
return False
except:
self.score_detail.errors.append('Bad syntax.')
return False
class ReviewerRegisterForm(Form):
"""Reviewer Register form."""
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
reviewer_name = StringField('Name',
validators=[DataRequired(), Length(min=1, max=50)])
reviewer_department = StringField('Department', validators=[DataRequired()])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Confirm Password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(ReviewerRegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(ReviewerRegisterForm, self).validate()
if not initial_validation:
return False
user = User.query.filter_by(username=self.username.data).first()
if user:
self.username.errors.append('Username already registered')
return False
reviewer = Reviewer.query.filter_by(name=self.reviewer_name.data).first()
if reviewer:
self.reviewer_name.errors.append('This person already registered')
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append('Email already registered')
return False
return True
class ReviewerDeleteForm(Form):
"""
An empty form for deleting existing reviewer. Used for CSRF protection.
"""
class WorksAssignForm(Form):
"""
An multi-select form to assign one works to many reviews. No specific limitation now.
"""
reviewers = SelectMultipleField("Assigned Reviewers", coerce=int)
def __init__(self, *args, **kwargs):
"""Create instance."""
super(WorksAssignForm, self).__init__(*args, **kwargs)
def validate(self):
initial_validation = super(WorksAssignForm, self).validate()
if not initial_validation:
return False
return True
def select_multi_checkbox(self, table_class='table', **kwargs):
field = self.reviewers
kwargs.setdefault('type', 'checkbox')
field_id = kwargs.pop('id', field.id)
html = ['<table %s>' % html_params(id=field_id, class_=table_class)]
html.append("""<tr>
<th>Name</th>
<th>Department</th>
<th>Selected</th>
</tr>""")
for value, label, checked in field.iter_choices():
reviewer_info = Reviewer.get_by_id(value)
html.append("<tr>\n")
choice_id = '%s-%s' % (field_id, value)
options = dict(kwargs, name=field.name, value=value, id=choice_id)
if checked:
options['checked'] = 'checked'
html.append('<td><label for="%s">%s</label></td>\n' % (field_id, label))
html.append('<td>' + reviewer_info.department + '</td>\n')
html.append('<td><input %s /></td>\n' % html_params(**options))
html.append('</tr>')
html.append('</table>')
return ''.join(html)
|
|
import json
import os
from unittest import mock
import pytest
from click.testing import CliRunner
from great_expectations import DataContext
from great_expectations.cli.v012 import cli
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.util import filter_properties_dict
from tests.cli.utils import (
VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
assert_no_logging_messages_or_tracebacks,
)
from tests.test_utils import set_directory
def test_suite_help_output(caplog):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(cli, ["suite"], catch_exceptions=False)
assert result.exit_code == 0
assert (
"""Commands:
delete Delete an expectation suite from the expectation store.
demo Create a new demo Expectation Suite.
edit Generate a Jupyter notebook for editing an existing Expectation...
list Lists available Expectation Suites.
new Create a new empty Expectation Suite.
scaffold Scaffold a new Expectation Suite."""
in result.stdout
)
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_demo_on_context_with_no_datasources(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
We call the "suite demo" command on a data context that has no datasources
configured.
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_root_dir = empty_data_context.root_directory
root_dir = project_root_dir
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 1
assert "No datasources found in the context" in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_demo_enter_existing_suite_name_as_arg(
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
):
"""
We call the "suite demo" command with the name of an existing expectation
suite in the --suite argument
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
not_so_empty_data_context = data_context_parameterized_expectation_suite
project_root_dir = not_so_empty_data_context.root_directory
os.mkdir(os.path.join(project_root_dir, "uncommitted"))
context = DataContext(project_root_dir)
existing_suite_name = "my_dag_node.default"
assert context.list_expectation_suite_names() == [existing_suite_name]
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"demo",
"-d",
project_root_dir,
"--suite",
existing_suite_name,
"--no-view",
],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 1
assert (
f"An expectation suite named `{existing_suite_name}` already exists." in stdout
)
assert (
f"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`"
in stdout
)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_demo_answer_suite_name_prompts_with_name_of_existing_suite(
mock_webbrowser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
We call the "suite demo" command without the suite name argument
The command should:
- prompt us to enter the name of the expectation suite that will be
created. We answer the prompt with the name of an existing expectation suite.
- display an error message and let us retry until we answer
with a name that is not "taken".
- create an example suite
- NOT open jupyter
- open DataDocs to the new example suite page
"""
not_so_empty_data_context = data_context_parameterized_expectation_suite
root_dir = not_so_empty_data_context.root_directory
os.mkdir(os.path.join(root_dir, "uncommitted"))
runner = CliRunner(mix_stderr=False)
csv_path = os.path.join(filesystem_csv_2, "f1.csv")
existing_suite_name = "my_dag_node.default"
context = DataContext(root_dir)
assert context.list_expectation_suite_names() == [existing_suite_name]
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input=f"{csv_path}\n{existing_suite_name}\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
f"An expectation suite named `{existing_suite_name}` already exists." in stdout
)
assert (
f"If you intend to edit the suite please use `great_expectations suite edit {existing_suite_name}`"
in stdout
)
assert "Enter the path" in stdout
assert "Name the new Expectation Suite [f1.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here"
in stdout
)
assert "open a notebook for you now" not in stdout
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_subprocess.call_count == 0
assert mock_webbrowser.call_count == 1
foo = os.path.join(
root_dir, "uncommitted/data_docs/local_site/validations/my_new_suite/"
)
assert f"file://{foo}" in mock_webbrowser.call_args[0][0]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_new_creates_empty_suite(
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
Running "suite new" should:
- make an empty suite
- open jupyter
- NOT open data docs
"""
project_root_dir = data_context_parameterized_expectation_suite.root_directory
os.mkdir(os.path.join(project_root_dir, "uncommitted"))
root_dir = project_root_dir
with set_directory(root_dir):
runner = CliRunner(mix_stderr=False)
csv = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "new", "-d", root_dir, "--suite", "foo"],
input=f"{csv}\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new expectation suite" not in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
not in stdout
)
assert "Generating example Expectation Suite..." not in stdout
assert "The following Data Docs sites were built" not in stdout
assert (
"Great Expectations will create a new Expectation Suite 'foo' and store it here"
in stdout
)
assert (
"Because you requested an empty suite, we'll open a notebook for you now to edit it!"
in stdout
)
expected_suite_path = os.path.join(root_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
expected_notebook = os.path.join(root_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook)
context = DataContext(root_dir)
assert "foo" in context.list_expectation_suite_names()
suite = context.get_expectation_suite("foo")
assert suite.expectations == []
citations = suite.get_citations()
citations[0].pop("citation_date", None)
citations[0].pop("interactive", None)
assert filter_properties_dict(properties=citations[0], clean_falsy=True) == {
"batch_kwargs": {
"data_asset_name": "f1",
"datasource": "mydatasource",
"path": csv,
"reader_method": "read_csv",
},
"comment": "New suite added via CLI",
}
assert mock_subprocess.call_count == 1
call_args = mock_subprocess.call_args[0][0]
assert call_args[0] == "jupyter"
assert call_args[1] == "notebook"
assert expected_notebook in call_args[2]
assert mock_webbroser.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_new_empty_with_no_jupyter(
mock_webbroser,
mock_subprocess,
caplog,
data_context_parameterized_expectation_suite,
filesystem_csv_2,
):
"""
Running "suite new --no-jupyter" should:
- make an empty suite
- NOT open jupyter
- NOT open data docs
"""
os.mkdir(
os.path.join(
data_context_parameterized_expectation_suite.root_directory, "uncommitted"
)
)
root_dir = data_context_parameterized_expectation_suite.root_directory
runner = CliRunner(mix_stderr=False)
csv = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "new", "-d", root_dir, "--suite", "foo", "--no-jupyter"],
input=f"{csv}\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new expectation suite" not in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
not in stdout
)
assert "Generating example Expectation Suite..." not in stdout
assert "The following Data Docs sites were built" not in stdout
assert (
"Great Expectations will create a new Expectation Suite 'foo' and store it here"
in stdout
)
assert "open a notebook for you now" not in stdout
expected_suite_path = os.path.join(root_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
expected_notebook = os.path.join(root_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook)
context = DataContext(root_dir)
assert "foo" in context.list_expectation_suite_names()
suite = context.get_expectation_suite("foo")
assert suite.expectations == []
citations = suite.get_citations()
citations[0].pop("citation_date", None)
citations[0].pop("interactive", None)
assert filter_properties_dict(properties=citations[0], clean_falsy=True) == {
"batch_kwargs": {
"data_asset_name": "f1",
"datasource": "mydatasource",
"path": csv,
"reader_method": "read_csv",
},
"comment": "New suite added via CLI",
}
assert mock_subprocess.call_count == 0
assert mock_webbroser.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_demo_one_datasource_without_generator_without_suite_name_argument(
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
):
"""
We call the "suite demo" command without the suite name argument
The command should:
- NOT prompt us to choose a datasource (because there is only one)
- prompt us only to enter the path (The datasource has no generator
configured and not to choose from the generator's list of available data
assets).
- We enter the path of the file we want the command to use as the batch to
create the expectation suite.
- prompt us to enter the name of the expectation suite that will be
created
- open Data Docs
- NOT open jupyter
"""
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
context = empty_data_context
root_dir = context.root_directory
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
csv_path = os.path.join(filesystem_csv_2, "f1.csv")
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input=f"{csv_path}\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Enter the path" in stdout
assert "Name the new Expectation Suite [f1.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 1
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "my_new_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 1
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_demo_multiple_datasources_with_generator_without_suite_name_argument(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
We call the "suite demo" command without the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select a
generator asset from the list.
- The command should prompt us to enter the name of the expectation suite
that will be created.
- open Data Docs
- NOT open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
with set_directory(root_dir):
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input="\n1\n1\n1\nmy_new_suite\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
"""Select a datasource
1. mydatasource
2. random
3. titanic"""
in stdout
)
assert (
"""Which data would you like to use?
1. random (directory)
2. titanic (directory)"""
in stdout
)
assert "Name the new Expectation Suite [random.warning]" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 2
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(
root_dir, "expectations", "my_new_suite.json"
)
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_demo_multiple_datasources_with_generator_with_suite_name_argument(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
We call the "suite demo" command with the suite name argument
- The data context has two datasources - we choose one of them.
- It has a generator configured. We choose to use the generator and select
a generator asset from the list.
- open Data Docs
- NOT open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
with set_directory(root_dir):
context = DataContext(root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Select a datasource" in stdout
assert "Which data would you like to use" in stdout
assert (
"Great Expectations will choose a couple of columns and generate expectations"
in stdout
)
assert "Generating example Expectation Suite..." in stdout
assert "Building" in stdout
assert "The following Data Docs sites will be built" in stdout
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:"
in stdout
)
obs_urls = context.get_docs_sites_urls()
assert len(obs_urls) == 2
assert (
"great_expectations/uncommitted/data_docs/local_site/index.html"
in obs_urls[0]["site_url"]
)
expected_index_path = os.path.join(
root_dir, "uncommitted", "data_docs", "local_site", "index.html"
)
assert os.path.isfile(expected_index_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
def test_suite_edit_without_suite_name_raises_error():
"""This is really only testing click missing arguments"""
runner = CliRunner(mix_stderr=False)
result = runner.invoke(cli, "suite edit", catch_exceptions=False)
assert result.exit_code == 2
assert (
'Error: Missing argument "SUITE".' in result.stderr
or "Error: Missing argument 'SUITE'." in result.stderr
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_with_invalid_json_batch_kwargs_raises_helpful_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "foo", "-d", project_dir, "--batch-kwargs", "'{foobar}'"],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "Please check that your batch_kwargs are valid JSON." in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_with_batch_kwargs_unable_to_load_a_batch_raises_helpful_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
context.add_datasource("source", class_name="PandasDatasource")
runner = CliRunner(mix_stderr=False)
batch_kwargs = '{"table": "fake", "datasource": "source"}'
result = runner.invoke(
cli,
["suite", "edit", "foo", "-d", project_dir, "--batch-kwargs", batch_kwargs],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "To continue editing this suite" not in stdout
assert "Please check that your batch_kwargs are able to load a batch." in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_with_non_existent_suite_name_raises_error(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
assert not empty_data_context.list_expectation_suites()
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite edit not_a_real_suite -d {project_dir}",
catch_exceptions=False,
)
assert result.exit_code == 1
assert "Could not find a suite named `not_a_real_suite`." in result.output
assert "by running `great_expectations suite list`" in result.output
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message(
mock_webbrowser, mock_subprocess, caplog, empty_data_context
):
"""
The command should:
- exit with a clear error message
- NOT open Data Docs
- NOT open jupyter
"""
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
assert context.list_expectation_suites()[0].expectation_suite_name == "foo"
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite edit foo -d {project_dir} --datasource not_real",
catch_exceptions=False,
)
assert result.exit_code == 1
assert (
"Unable to load datasource `not_real` -- no configuration found or invalid configuration."
in result.output
)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_without_citations(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
with set_directory(root_dir):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
mock_webbrowser.reset_mock()
mock_subprocess.reset_mock()
# remove the citations from the suite
context = DataContext(root_dir)
suite = context.get_expectation_suite("foo_suite")
assert isinstance(suite, ExpectationSuite)
suite.meta.pop("citations")
context.save_expectation_suite(suite)
# Actual testing really starts here
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo_suite",
"-d",
root_dir,
],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "A batch of data is required to edit the suite" in stdout
assert "Select a datasource" in stdout
assert "Which data would you like to use" in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_multiple_datasources_with_generator_with_no_additional_args_with_suite_containing_citations(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
Here we verify that the "suite edit" command uses the batch kwargs found in
citations in the existing suite when it is called without the optional
arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our
test will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments.
The command should:
- NOT open Data Docs
- NOT open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
with set_directory(root_dir):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert mock_webbrowser.call_count == 2
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert result.exit_code == 0
context = DataContext(root_dir)
suite = context.get_expectation_suite("foo_suite")
assert isinstance(suite, ExpectationSuite)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "foo_suite", "-d", root_dir],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_multiple_datasources_with_generator_with_batch_kwargs_arg(
mock_webbrowser,
mock_subprocess,
caplog,
site_builder_data_context_with_html_store_titanic_random,
):
"""
Here we verify that when the "suite edit" command is called with batch_kwargs arg
that specifies the batch that will be used as a sample for editing the suite,
the command processes the batch_kwargs correctly and skips all the prompts
that help users to specify the batch (when called without batch_kwargs).
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has two datasources - we choose one of them. It has a generator
configured. We choose to use the generator and select a generator asset from the list.
The command should:
- NOT open Data Docs
- open jupyter
"""
root_dir = site_builder_data_context_with_html_store_titanic_random.root_directory
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir, "--suite", "foo_suite", "--no-view"],
input="\n2\n1\n1\n\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'foo_suite' here:"
in stdout
)
batch_kwargs = {
"datasource": "random",
"path": str(
os.path.join(
os.path.abspath(os.path.join(root_dir, os.pardir)),
"data",
"random",
"f1.csv",
)
),
}
batch_kwargs_arg_str = json.dumps(batch_kwargs)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo_suite",
"-d",
root_dir,
"--batch-kwargs",
batch_kwargs_arg_str,
],
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_foo_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(root_dir, "expectations", "foo_suite.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_on_exsiting_suite_one_datasources_with_batch_kwargs_without_datasource_raises_helpful_error(
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
):
"""
Given:
- the suite foo exists
- the a datasource exists
- and the users runs this
great_expectations suite edit foo --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user should see a nice error and the program halts before notebook
compilation.
- NOT open Data Docs
- NOT open jupyter
'"""
project_dir = titanic_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
batch_kwargs = {"path": "../data/Titanic.csv"}
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo",
"-d",
project_dir,
"--batch-kwargs",
json.dumps(batch_kwargs),
],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "Please check that your batch_kwargs are able to load a batch." in stdout
assert "Unable to load datasource `None`" in stdout
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_on_exsiting_suite_one_datasources_with_datasource_arg_and_batch_kwargs(
mock_webbrowser,
mock_subprocess,
caplog,
titanic_data_context,
):
"""
Given:
- the suite foo exists
- the a datasource bar exists
- and the users runs this
great_expectations suite edit foo --datasource bar --batch-kwargs '{"path": "data/10k.csv"}'
Then:
- The user gets a working notebook
- NOT open Data Docs
- open jupyter
"""
project_dir = titanic_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("foo")
runner = CliRunner(mix_stderr=False)
batch_kwargs = {"path": os.path.join(project_dir, "../", "data", "Titanic.csv")}
result = runner.invoke(
cli,
[
"suite",
"edit",
"foo",
"-d",
project_dir,
"--batch-kwargs",
json.dumps(batch_kwargs),
"--datasource",
"mydatasource",
],
catch_exceptions=False,
)
stdout = result.output
assert stdout == ""
assert result.exit_code == 0
expected_notebook_path = os.path.join(project_dir, "uncommitted", "edit_foo.ipynb")
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(project_dir, "expectations", "foo.json")
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_suite_edit_one_datasources_no_generator_with_no_additional_args_and_no_citations(
mock_webbrowser, mock_subprocess, caplog, empty_data_context, filesystem_csv_2
):
"""
Here we verify that the "suite edit" command helps the user to specify the batch
kwargs when it is called without the optional arguments that specify the batch.
First, we call the "suite new" command to create the expectation suite our test
will edit - this step is a just a setup.
We call the "suite edit" command without any optional arguments. This means that
the command will help us specify the batch kwargs interactively.
The data context has one datasource. The datasource has no generators
configured. The command prompts us to enter the file path.
"""
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
not_so_empty_data_context = empty_data_context
project_root_dir = not_so_empty_data_context.root_directory
root_dir = project_root_dir
with set_directory(root_dir):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "demo", "-d", root_dir],
input="{:s}\nmy_new_suite\n\n".format(
os.path.join(filesystem_csv_2, "f1.csv")
),
catch_exceptions=False,
)
stdout = result.stdout
assert mock_webbrowser.call_count == 1
assert mock_subprocess.call_count == 0
mock_subprocess.reset_mock()
mock_webbrowser.reset_mock()
assert result.exit_code == 0
assert (
"Great Expectations will store these expectations in a new Expectation Suite 'my_new_suite' here:"
in stdout
)
# remove the citations from the suite
context = DataContext(project_root_dir)
suite = context.get_expectation_suite("my_new_suite")
suite.meta.pop("citations")
context.save_expectation_suite(suite)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "edit", "my_new_suite", "-d", root_dir],
input="{:s}\n\n".format(os.path.join(filesystem_csv_2, "f1.csv")),
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "Select a datasource" not in stdout
assert "Which data would you like to use" not in stdout
assert "Enter the path" in stdout
expected_notebook_path = os.path.join(
root_dir, "uncommitted", "edit_my_new_suite.ipynb"
)
assert os.path.isfile(expected_notebook_path)
expected_suite_path = os.path.join(
root_dir, "expectations", "my_new_suite.json"
)
assert os.path.isfile(expected_suite_path)
assert mock_webbrowser.call_count == 0
assert mock_subprocess.call_count == 1
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
def test_suite_list_with_zero_suites(caplog, empty_data_context):
project_dir = empty_data_context.root_directory
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite list -d {project_dir}",
catch_exceptions=False,
)
assert result.exit_code == 0
assert "No Expectation Suites found" in result.output
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
def test_suite_list_with_one_suite(caplog, empty_data_context):
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("a.warning")
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite list -d {project_dir}",
catch_exceptions=False,
)
assert result.exit_code == 0
assert "1 Expectation Suite found" in result.output
assert "a.warning" in result.output
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
def test_suite_list_with_multiple_suites(caplog, empty_data_context):
project_dir = empty_data_context.root_directory
context = DataContext(project_dir)
context.create_expectation_suite("a.warning")
context.create_expectation_suite("b.warning")
context.create_expectation_suite("c.warning")
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite list -d {project_dir}",
catch_exceptions=False,
)
output = result.output
assert result.exit_code == 0
assert "3 Expectation Suites found:" in output
assert "a.warning" in output
assert "b.warning" in output
assert "c.warning" in output
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_suite_delete_with_zero_suites(
mock_emit, caplog, empty_data_context_stats_enabled
):
project_dir = empty_data_context_stats_enabled.root_directory
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite delete not_a_suite -d {project_dir}",
catch_exceptions=False,
)
assert result.exit_code == 1
assert "No expectation suites found in the project" in result.output
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.delete",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_suite_delete_with_non_existent_suite(
mock_emit, caplog, empty_data_context_stats_enabled
):
context = empty_data_context_stats_enabled
project_dir = context.root_directory
suite = context.create_expectation_suite("foo")
context.save_expectation_suite(suite)
mock_emit.reset_mock()
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite delete not_a_suite -d {project_dir}",
catch_exceptions=False,
)
assert result.exit_code == 1
assert "No expectation suite named not_a_suite found" in result.output
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.delete",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_suite_delete_with_one_suite(
mock_emit, caplog, empty_data_context_stats_enabled
):
project_dir = empty_data_context_stats_enabled.root_directory
context = DataContext(project_dir)
suite = context.create_expectation_suite("a.warning")
context.save_expectation_suite(suite)
mock_emit.reset_mock()
suite_dir = os.path.join(project_dir, "expectations", "a")
suite_path = os.path.join(suite_dir, "warning.json")
assert os.path.isfile(suite_path)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
f"suite delete a.warning -d {project_dir}",
catch_exceptions=False,
)
assert result.exit_code == 0
assert "Deleted the expectation suite named: a.warning" in result.output
# assert not os.path.isdir(suite_dir)
assert not os.path.isfile(suite_path)
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.delete",
"event_payload": {"api_version": "v2"},
"success": True,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
def test_suite_scaffold_on_context_with_no_datasource_raises_error(
mock_subprocess, mock_emit, caplog, empty_data_context_stats_enabled
):
"""
We call the "suite scaffold" command on a context with no datasource
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
"""
context = empty_data_context_stats_enabled
root_dir = context.root_directory
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", "foop", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert (
"No datasources found in the context. To add a datasource, run `great_expectations datasource new`"
in stdout
)
assert mock_subprocess.call_count == 0
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_suite_scaffold_on_existing_suite_raises_error(
mock_emit, caplog, empty_data_context_stats_enabled
):
"""
We call the "suite scaffold" command with an existing suite
The command should:
- exit with a clear error message
- send a DataContext init success message
- send a scaffold fail message
"""
context = empty_data_context_stats_enabled
root_dir = context.root_directory
suite = context.create_expectation_suite("foop")
context.save_expectation_suite(suite)
assert context.list_expectation_suite_names() == ["foop"]
mock_emit.reset_mock()
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", "foop", "-d", root_dir],
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "An expectation suite named `foop` already exists." in stdout
assert (
"If you intend to edit the suite please use `great_expectations suite edit foop`."
in stdout
)
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
def test_suite_scaffold_creates_notebook_and_opens_jupyter(
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
):
"""
We call the "suite scaffold" command
The command should:
- create a new notebook
- open the notebook in jupyter
- send a DataContext init success message
- send a scaffold success message
"""
context = titanic_data_context_stats_enabled
root_dir = context.root_directory
suite_name = "foop"
expected_notebook_path = os.path.join(
root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb"
)
assert not os.path.isfile(expected_notebook_path)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", suite_name, "-d", root_dir],
input="1\n1\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert os.path.isfile(expected_notebook_path)
assert mock_subprocess.call_count == 1
assert mock_subprocess.call_args_list == [
mock.call(["jupyter", "notebook", expected_notebook_path])
]
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": True,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
def test_suite_scaffold_creates_notebook_with_no_jupyter_flag(
mock_subprocess, mock_emit, caplog, titanic_data_context_stats_enabled
):
"""
We call the "suite scaffold --no-jupyter"
The command should:
- create a new notebook
- NOT open the notebook in jupyter
- tell the user to open the notebook
- send a DataContext init success message
- send a scaffold success message
"""
context = titanic_data_context_stats_enabled
root_dir = context.root_directory
suite_name = "foop"
expected_notebook_path = os.path.join(
root_dir, context.GE_EDIT_NOTEBOOK_DIR, f"scaffold_{suite_name}.ipynb"
)
assert not os.path.isfile(expected_notebook_path)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["suite", "scaffold", suite_name, "-d", root_dir, "--no-jupyter"],
input="1\n1\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert os.path.isfile(expected_notebook_path)
assert (
f"To continue scaffolding this suite, run `jupyter notebook {expected_notebook_path}`"
in stdout
)
assert mock_subprocess.call_count == 0
assert mock_emit.call_count == 2
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.suite.scaffold",
"event_payload": {"api_version": "v2"},
"success": True,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-master
'''
import copy
import sys
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
# ansible.compat.six goes away with Ansible 2.4
try:
from ansible.compat.six import string_types, u
except ImportError:
from ansible.module_utils.six import string_types, u
import yaml
class IdentityProviderBase(object):
""" IdentityProviderBase
Attributes:
name (str): Identity provider Name
login (bool): Is this identity provider a login provider?
challenge (bool): Is this identity provider a challenge provider?
provider (dict): Provider specific config
_idp (dict): internal copy of the IDP dict passed in
_required (list): List of lists of strings for required attributes
_optional (list): List of lists of strings for optional attributes
_allow_additional (bool): Does this provider support attributes
not in _required and _optional
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
# disabling this check since the number of instance attributes are
# necessary for this class
# pylint: disable=too-many-instance-attributes
def __init__(self, api_version, idp):
if api_version not in ['v1']:
raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version))
self._idp = copy.deepcopy(idp)
if 'name' not in self._idp:
raise errors.AnsibleFilterError("|failed identity provider missing a name")
if 'kind' not in self._idp:
raise errors.AnsibleFilterError("|failed identity provider missing a kind")
self.name = self._idp.pop('name')
self.login = ansible_bool(self._idp.pop('login', False))
self.challenge = ansible_bool(self._idp.pop('challenge', False))
self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))
mm_keys = ('mappingMethod', 'mapping_method')
mapping_method = None
for key in mm_keys:
if key in self._idp:
mapping_method = self._idp.pop(key)
if mapping_method is None:
mapping_method = self.get_default('mappingMethod')
self.mapping_method = mapping_method
valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
if self.mapping_method not in valid_mapping_methods:
raise errors.AnsibleFilterError("|failed unknown mapping method "
"for provider {0}".format(self.__class__.__name__))
self._required = []
self._optional = []
self._allow_additional = True
@staticmethod
def validate_idp_list(idp_list, openshift_version, deployment_type):
''' validates a list of idps '''
login_providers = [x.name for x in idp_list if x.login]
multiple_logins_unsupported = False
if len(login_providers) > 1:
if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:
if LooseVersion(openshift_version) < LooseVersion('3.2'):
multiple_logins_unsupported = True
if deployment_type in ['origin']:
if LooseVersion(openshift_version) < LooseVersion('1.2'):
multiple_logins_unsupported = True
if multiple_logins_unsupported:
raise errors.AnsibleFilterError("|failed multiple providers are "
"not allowed for login. login "
"providers: {0}".format(', '.join(login_providers)))
names = [x.name for x in idp_list]
if len(set(names)) != len(names):
raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
for idp in idp_list:
idp.validate()
def validate(self):
''' validate an instance of this idp class '''
pass
@staticmethod
def get_default(key):
''' get a default value for a given key '''
if key == 'mappingMethod':
return 'claim'
else:
return None
def set_provider_item(self, items, required=False):
''' set a provider item based on the list of item names provided. '''
for item in items:
provider_key = items[0]
if item in self._idp:
self.provider[provider_key] = self._idp.pop(item)
break
else:
default = self.get_default(provider_key)
if default is not None:
self.provider[provider_key] = default
elif required:
raise errors.AnsibleFilterError("|failed provider {0} missing "
"required key {1}".format(self.__class__.__name__, provider_key))
def set_provider_items(self):
''' set the provider items for this idp '''
for items in self._required:
self.set_provider_item(items, True)
for items in self._optional:
self.set_provider_item(items)
if self._allow_additional:
for key in self._idp.keys():
self.set_provider_item([key])
else:
if len(self._idp) > 0:
raise errors.AnsibleFilterError("|failed provider {0} "
"contains unknown keys "
"{1}".format(self.__class__.__name__, ', '.join(self._idp.keys())))
def to_dict(self):
''' translate this idp to a dictionary '''
return dict(name=self.name, challenge=self.challenge,
login=self.login, mappingMethod=self.mapping_method,
provider=self.provider)
class LDAPPasswordIdentityProvider(IdentityProviderBase):
""" LDAPPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['attributes'], ['url'], ['insecure']]
self._optional += [['ca'],
['bindDN', 'bind_dn'],
['bindPassword', 'bind_password']]
self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))
if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:
pref_user = self._idp['attributes'].pop('preferred_username')
self._idp['attributes']['preferredUsername'] = pref_user
def validate(self):
''' validate this idp instance '''
if not isinstance(self.provider['attributes'], dict):
raise errors.AnsibleFilterError("|failed attributes for provider "
"{0} must be a dictionary".format(self.__class__.__name__))
attrs = ['id', 'email', 'name', 'preferredUsername']
for attr in attrs:
if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):
raise errors.AnsibleFilterError("|failed {0} attribute for "
"provider {1} must be a list".format(attr, self.__class__.__name__))
unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)
if len(unknown_attrs) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs)))
class KeystonePasswordIdentityProvider(IdentityProviderBase):
""" KeystoneIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url'], ['domainName', 'domain_name']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
class RequestHeaderIdentityProvider(IdentityProviderBase):
""" RequestHeaderIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['headers']]
self._optional += [['challengeURL', 'challenge_url'],
['loginURL', 'login_url'],
['clientCA', 'client_ca'],
['clientCommonNames', 'client_common_names'],
['emailHeaders', 'email_headers'],
['nameHeaders', 'name_headers'],
['preferredUsernameHeaders', 'preferred_username_headers']]
def validate(self):
''' validate this idp instance '''
if not isinstance(self.provider['headers'], list):
raise errors.AnsibleFilterError("|failed headers for provider {0} "
"must be a list".format(self.__class__.__name__))
class AllowAllPasswordIdentityProvider(IdentityProviderBase):
""" AllowAllPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
class DenyAllPasswordIdentityProvider(IdentityProviderBase):
""" DenyAllPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
""" HTPasswdPasswordIdentity
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['file', 'filename', 'fileName', 'file_name']]
@staticmethod
def get_default(key):
if key == 'file':
return '/etc/origin/htpasswd'
else:
return IdentityProviderBase.get_default(key)
class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
""" BasicAuthPasswordIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
class IdentityProviderOauthBase(IdentityProviderBase):
""" IdentityProviderOauthBase
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
super(IdentityProviderOauthBase, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
def validate(self):
''' validate this idp instance '''
if self.challenge:
raise errors.AnsibleFilterError("|failed provider {0} does not "
"allow challenge authentication".format(self.__class__.__name__))
class OpenIDIdentityProvider(IdentityProviderOauthBase):
""" OpenIDIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._required += [['claims'], ['urls']]
self._optional += [['ca'],
['extraScopes'],
['extraAuthorizeParameters']]
if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:
pref_user = self._idp['claims'].pop('preferred_username')
self._idp['claims']['preferredUsername'] = pref_user
if 'urls' in self._idp and 'user_info' in self._idp['urls']:
user_info = self._idp['urls'].pop('user_info')
self._idp['urls']['userInfo'] = user_info
if 'extra_scopes' in self._idp:
self._idp['extraScopes'] = self._idp.pop('extra_scopes')
if 'extra_authorize_parameters' in self._idp:
self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
if 'extraAuthorizeParameters' in self._idp:
if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
def validate(self):
''' validate this idp instance '''
IdentityProviderOauthBase.validate(self)
if not isinstance(self.provider['claims'], dict):
raise errors.AnsibleFilterError("|failed claims for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):
if var in self.provider and not isinstance(self.provider[var], var_type):
raise errors.AnsibleFilterError("|failed {1} for provider "
"{0} must be a {2}".format(self.__class__.__name__,
var,
var_type.__class__.__name__))
required_claims = ['id']
optional_claims = ['email', 'name', 'preferredUsername']
all_claims = required_claims + optional_claims
for claim in required_claims:
if claim in required_claims and claim not in self.provider['claims']:
raise errors.AnsibleFilterError("|failed {0} claim missing "
"for provider {1}".format(claim, self.__class__.__name__))
for claim in all_claims:
if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):
raise errors.AnsibleFilterError("|failed {0} claims for "
"provider {1} must be a list".format(claim, self.__class__.__name__))
unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)
if len(unknown_claims) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims)))
if not isinstance(self.provider['urls'], dict):
raise errors.AnsibleFilterError("|failed urls for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
required_urls = ['authorize', 'token']
optional_urls = ['userInfo']
all_urls = required_urls + optional_urls
for url in required_urls:
if url not in self.provider['urls']:
raise errors.AnsibleFilterError("|failed {0} url missing for "
"provider {1}".format(url, self.__class__.__name__))
unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)
if len(unknown_urls) > 0:
raise errors.AnsibleFilterError("|failed provider {0} has unknown "
"urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls)))
class GoogleIdentityProvider(IdentityProviderOauthBase):
""" GoogleIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._optional += [['hostedDomain', 'hosted_domain']]
class GitHubIdentityProvider(IdentityProviderOauthBase):
""" GitHubIdentityProvider
Attributes:
Args:
api_version(str): OpenShift config version
idp (dict): idp config dict
Raises:
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
IdentityProviderOauthBase.__init__(self, api_version, idp)
self._optional += [['organizations']]
class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@staticmethod
def translate_idps(idps, api_version, openshift_version, deployment_type):
''' Translates a list of dictionaries into a valid identityProviders config '''
idp_list = []
if not isinstance(idps, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers")
for idp in idps:
if not isinstance(idp, dict):
raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries")
cur_module = sys.modules[__name__]
idp_class = getattr(cur_module, idp['kind'], None)
idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)
idp_inst.set_provider_items()
idp_list.append(idp_inst)
IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
return u(yaml.dump([idp.to_dict() for idp in idp_list],
allow_unicode=True,
default_flow_style=False,
width=float("inf"),
Dumper=AnsibleDumper))
@staticmethod
def validate_pcs_cluster(data, masters=None):
''' Validates output from "pcs status", ensuring that each master
provided is online.
Ex: data = ('...',
'PCSD Status:',
'master1.example.com: Online',
'master2.example.com: Online',
'master3.example.com: Online',
'...')
masters = ['master1.example.com',
'master2.example.com',
'master3.example.com']
returns True
'''
if not issubclass(type(data), string_types):
raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
if not issubclass(type(masters), list):
raise errors.AnsibleFilterError("|failed expects masters is a list")
valid = True
for master in masters:
if "{0}: Online".format(master) not in data:
valid = False
return valid
@staticmethod
def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):
''' Return certificates to synchronize based on facts. '''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
certs = ['admin.crt',
'admin.key',
'admin.kubeconfig',
'master.kubelet-client.crt',
'master.kubelet-client.key']
if bool(include_ca):
certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']
if bool(include_keys):
certs += ['serviceaccounts.private.key',
'serviceaccounts.public.key']
if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
certs += ['master.proxy-client.crt',
'master.proxy-client.key']
if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):
certs += ['openshift-master.crt',
'openshift-master.key',
'openshift-master.kubeconfig']
if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):
certs += ['service-signer.crt',
'service-signer.key']
if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):
certs += ['openshift-registry.crt',
'openshift-registry.key',
'openshift-registry.kubeconfig',
'openshift-router.crt',
'openshift-router.key',
'openshift-router.kubeconfig']
return certs
@staticmethod
def oo_htpasswd_users_from_file(file_contents):
''' return a dictionary of htpasswd users from htpasswd file contents '''
htpasswd_entries = {}
if not isinstance(file_contents, string_types):
raise errors.AnsibleFilterError("failed, expects to filter on a string")
for line in file_contents.splitlines():
user = None
passwd = None
if len(line) == 0:
continue
if ':' in line:
user, passwd = line.split(':', 1)
if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:
error_msg = "failed, expects each line to be a colon separated string representing the user and passwd"
raise errors.AnsibleFilterError(error_msg)
htpasswd_entries[user] = passwd
return htpasswd_entries
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps,
"validate_pcs_cluster": self.validate_pcs_cluster,
"certificates_to_synchronize": self.certificates_to_synchronize,
"oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
|
|
"""
Calculate the type 2 Signal Detection Theory (SDT) measure meta-d' for the
S1 and S2 responses according to the method described in:
Maniscalco, B., & Lau, H. (2012). A signal detection theoretic approach
for estimating metacognitive sensitivity from confidence ratings.
Consciousness and Cognition, 21(1), 422-430.
doi:10.1016/j.concog.2011.09.021
and
Maniscalco, B., & Lau, H. (2014). Signal detection theory analysis of
type 1 and type 2 data: meta-d', response-specific meta-d', and the
unequal variance SDT mode. In S. M. Fleming & C. D. Frith (Eds.),
The Cognitive Neuroscience of Metacognition (pp.25-66). Springer.
Only the equal variance approach and normally distributed inner decision
variables are currently supported. The function calculates the
response-specific meta-d' variables.
The performance of this code was compared to the Matlab code available
at http://www.columbia.edu/~bsm2105/type2sdt/
Results were equivalent. However, this Python code was about 15x faster.
Usage:
------
The class T2SDT implements the optimization of the type 2 SDT model.
As data, a confusion matrix (including confidence ratings) should be given.
After initialization, the fit() method of the class can be used to fit
the type 2 SDT model to the supplied data.
The confusion matrix (including condidence ratings) can be calculated
from data using the function confusion_matrix
"""
from __future__ import division
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
import warnings
# ignore division by 0 and invalid numerical operations
np.seterr(invalid='ignore', divide='ignore')
class T2SDT(object):
""" Fit a type 2 SDT analysis to the provided confusion matrix.
Attributes:
conf_matrix (array of ints): the confusion matrix, taking into
account the confidence ratings (cf. documentation to
confusion_matrix function), might be adjusted to avoid
fields containing zero (depends on the adjust parameter)
max_conf (int>0): the maximal possible confidence rating
HR (float): the type 1 Hit Rate
FAR (float): the type 1 False Alarm Rate
d (float): the d' measure for the type 1 classification task
denotes the ability to discriminate between S1 (stim. absent)
and S2 (stim present) trials in standard deviation units
c (float): the response bias of the type 1 classification task
meta_d_S1 (float): the meta-d' measure for S1 responses
denotes the ability of the subject to discriminate between
correct and incorrect S1 responses in the same units as those of
the type 1 task, only available after calling the fit method
meta_d_S2 (float): the meta-d' measure for S2 responses
denotes the ability of the subject to discriminate between
correct and incorrect S2 responses in the same units as those of
the type 1 task, only available after calling the fit method
meta_c_S1 (float): the response bias of the type 2 task for S1
responses
meta_c_S1 is calculated such that meta_c_S1' = c', where
c' = c/d' and meta_c_S1' = meta_c_S1/meta_d_S1'
only available after calling the fit method
meta_c_S2 (float): the response bias of the type 2 task for S2
responses
meta_c_S2 is calculated such that meta_c_S2' = c', where
c' = c/d' and meta_c_S2' = meta_c_S2/meta_d_S2'
only available after calling the fit method
meta_c2_S1 (array of floats): the decision criteria for each
confidence level if the response had been "S1" (stim. absent)
meta_c2_S1[0] belongs to confidence==1
meta_c2_S1[0] belongs to confidence==2
etc.
meta_c2_S1[0]>=meta_c2_S1[1]>=meta_c2_S1[2] ... >= meta_c_S1
only available after calling the fit method
meta_c2_S2 (array of floats): the decision criteria for each
confidence level if the response had been "S2" (stim. present)
meta_c2_S2[0] belongs to confidence==1
meta_c2_S2[0] belongs to confidence==2
etc.
meta_c2_S2[0]<=meta_c2_S2[1]<=meta_c2_S2[2] ... <= meta_c_S2
only available after calling the fit method
logL_S1 (float): the log-likelihood of the model for S1 responses;
only available after calling the fit method
logL_S2 (float): the log-likelihood of the model for S2 responses;
only available after calling the fit method
success_S1 (bool): whether the fitting was successful for S1
responses; only available after calling the fit method
success_S2 (bool): whether the fitting was successful for S2
responses; only available after calling the fit method
fit_message_S1 (str): the message output by the optimization
algorithm for S1 responses; only available after calling the
fit method
fit_message_S2 (str): the message output by the optimization
algorithm for S2 responses; only available after calling the
fit method
"""
def __init__(self, conf_matrix, adjust=False):
"""
Args:
conf_matrix (array of ints, shape 2x2x(max_conf + 1)): the
confusion matrix including confidence ratings
- conf_matrix[0,:] contains all trials that were actually
negative and conf_matrix[1,:] all trials that were
actually positive
- conf_matrix[:,0] contains all trials that were classified
as being negative and conf_matrix[:,1] all trials that
were classified as being positive
Accordingly:
- TN are in conf_matrix[0,0]
- TP are in conf_matrix[1,1]
- FN are in conf_matrix[1,0]
- FP are in conf_matrix[0,1]
The last axis is determined by the confidence rating:
- in conf_matrix[...,0], rating was 0
- in conf_matrix[...,1], rating was 1
- in conf_matrix[...,2], rating was 2
adjust (bool): if True, 1./(2*(max_conf + 1)) is added to all
entries of the confusion matrix to avoid zero entries
"""
conf_matrix = np.asarray(conf_matrix)
if not conf_matrix.ndim==3:
raise ValueError('conf_matrix must be 3d')
if not conf_matrix.shape[:2] == (2,2):
raise ValueError('the shape of the conf_matrix must be'
' 2x2xn, where n is the number of possible confidence'
' ratings.')
self.conf_matrix = conf_matrix
self.max_conf = self.conf_matrix.shape[-1] - 1
if self.max_conf < 1:
raise ValueError('All confidence ratings are equal.')
if adjust:
self.conf_matrix = self.conf_matrix + 1./(2*(self.max_conf + 1))
if np.any(self.conf_matrix == 0):
warnings.warn('Some entries of the confusion matrix are 0'
' This might cause problems with fitting the SDT model.',
UserWarning)
TP = self.conf_matrix.sum(-1)[1,1] # true positives
FN = self.conf_matrix.sum(-1)[1,0] # false negatives
TN = self.conf_matrix.sum(-1)[0,0] # true negatives
FP = self.conf_matrix.sum(-1)[0,1] # false positives
self.HR = TP/float(TP + FN) # the hit rate
self.FAR = FP/float(FP + TN) # the false alarm rate
z_HR = norm.ppf(self.HR)
z_FAR = norm.ppf(self.FAR)
self.d = z_HR - z_FAR # the type I sensitivity
self.c = -0.5*(z_HR + z_FAR) # the type I decision criterion
def fit(self):
"""
Fit the type 2 SDT model to maximize log-likelihood between the
model and the obervations.
This generates the attributes meta_d_S1, meta_c_S1, meta_c_S2,
meta_c2_S1, meta_c2_S2
"""
###############################
# fit meta_d for S1 responses #
###############################
result_S1 = minimize(self._get_log_likelihood,
x0 = [0] + self.max_conf*[-0.1],
args = ('S1',),
method = 'L-BFGS-B',
jac = True,
bounds = ([(None,None)] + self.max_conf*[(None, 0)]),
options=dict(disp=False))
self.meta_d_S1, self.meta_c_S1, self.meta_c2_S1 = (
self._get_parameters(result_S1.x))
self.logL_S1 = -result_S1.fun
self.success_S1 = result_S1.success
self.fit_message_S1 = result_S1.message
###############################
# fit meta_d for S2 responses #
###############################
result_S2 = minimize(self._get_log_likelihood,
x0 = [0] + self.max_conf*[0.1],
method = 'L-BFGS-B',
args = ('S2',),
jac = True,
bounds = ([(None,None)] + self.max_conf*[(0, None)]),
options=dict(disp=False))
self.meta_d_S2, self.meta_c_S2, self.meta_c2_S2 = (
self._get_parameters(result_S2.x))
self.logL_S2 = -result_S2.fun
self.success_S2 = result_S2.success
self.fit_message_S2 = result_S2.message
def _get_log_likelihood(self, x, which='S1', return_der=True):
"""Internal method, do not use directly!
Calculates the (negative) log-likelihood of the fitted model.
The negative log-likelihood is returnd to maximize the
log-likelihood by minimizing the output of this function.
The docstring to the method _get_parameters explains how x is
translated to the parameters of the type 2 model
"""
if not which in ['S1', 'S2']:
raise ValueError('which must be S1 or S2')
meta_d, meta_c, meta_c2 = self._get_parameters(x)
# initialize an empty matrix of probabilities for each outcome
# (i.e., for each combination of stimulus, response and confidence)
cumprobs = np.empty([2, self.max_conf + 2])
probs = np.empty([2,self.max_conf + 1], float)
##########################################
# calculate the elementary probabilities #
##########################################
# calculate the response-specific cumulative probabilities for all
# ratings
if which is 'S1':
cumprobs[0] = np.r_[norm.cdf(np.r_[meta_c, meta_c2],
-0.5*meta_d),0]
cumprobs[1] = np.r_[norm.cdf(np.r_[meta_c, meta_c2],
0.5*meta_d), 0]
else:
cumprobs[0] = np.r_[norm.sf(np.r_[meta_c, meta_c2],
-0.5*meta_d), 0]
cumprobs[1] = np.r_[norm.sf(np.r_[meta_c, meta_c2],
0.5*meta_d), 0]
# calculate the response-specific probabilities for all ratings
probs = (cumprobs[...,:-1] - cumprobs[...,1:])/cumprobs[...,0,
np.newaxis]
# calculate the log likelihood
if which is 'S1':
total_logp = np.sum(np.log(probs)*self.conf_matrix[:,0], None)
else:
total_logp = np.sum(np.log(probs)*self.conf_matrix[:,1], None)
if return_der:
# calculate the derivative
total_logp_der = np.zeros(len(x), float)
# calculate the derivative of cumprobs
cumprobs_der = np.zeros([2,self.max_conf + 2])
if which is 'S1':
cumprobs_der[0] = np.r_[norm.pdf(np.r_[
meta_c, meta_c2], -0.5*meta_d), 0]
cumprobs_der[1] = np.r_[norm.pdf(np.r_[
meta_c, meta_c2], 0.5*meta_d), 0]
else:
cumprobs_der[0] = np.r_[-norm.pdf(np.r_[
meta_c, meta_c2], -0.5*meta_d), 0]
cumprobs_der[1] = np.r_[-norm.pdf(np.r_[
meta_c, meta_c2], 0.5*meta_d), 0]
#################################################
# calculate derivatives for the meta-d' element #
#################################################
cumprobs_der_meta_d = cumprobs_der.copy()
cumprobs_der_meta_d[0] *= self.c/self.d + 0.5
cumprobs_der_meta_d[1] *= self.c/self.d - 0.5
# calculate the derivative of probs according to the quotient
# rule
probs_der_meta_d = (
(cumprobs_der_meta_d[...,:-1] -
cumprobs_der_meta_d[...,1:])*cumprobs[
...,0,np.newaxis] -
(cumprobs[...,:-1] - cumprobs[...,1:])*
cumprobs_der_meta_d[...,0,np.newaxis])/(
cumprobs[...,0,np.newaxis]**2)
if which is 'S1':
total_logp_der[0] = np.sum(
self.conf_matrix[:,0]/probs*probs_der_meta_d, None)
else:
total_logp_der[0] = np.sum(
self.conf_matrix[:,1]/probs*probs_der_meta_d, None)
############################################
# calculate derivative for the c2 elements #
############################################
cumprobs_der /= cumprobs[...,0, np.newaxis]
cumprobs_der_diff = cumprobs_der[...,:-1] - cumprobs_der[...,1:]
# calculate the derivative of the log od the probs and the
# product with the confidence ratings
if which is 'S1':
log_cumprobs_der = (
cumprobs_der[...,1:]*self.conf_matrix[:,0]/probs)
log_cumprobs_diff_der = cumprobs_der_diff[
...,1:]*self.conf_matrix[...,0,1:]/probs[...,1:]
else:
log_cumprobs_der = (
cumprobs_der[...,1:]*self.conf_matrix[:,1]/probs)
log_cumprobs_diff_der = cumprobs_der_diff[
...,1:]*self.conf_matrix[...,1,1:]/probs[...,1:]
total_logp_der[1:] = (
np.cumsum(
log_cumprobs_diff_der.sum(0)[...,::-1], axis=-1)[
...,::-1] - log_cumprobs_der[
...,:self.max_conf].sum(0))
return -total_logp, -total_logp_der
else:
return -total_logp
def _get_parameters(self, x):
"""Internal method, do not use directly!
From the optimization input x, get meta_d, meta_c_S1, and meta_c_S2
The passed parameter list x consists of
- meta_d
- the offsets x2 such that:
meta_c2 = meta_c + np.cumsum(x2)
The length of x2 must be max_conf.
- If x2 is strictly negative, this results in the meta_c2
parameters for S1.
- If x2 is strictly positive, this results in the meta_c2
parameters for S2.
Meta_c is chosen such that meta_c/meta_d' = c/d'
Notes:
Let d' and c of the primary condition be d' = 2 and c = 1
max_conf = 2 (i.e., confidence is rated on a scale 0-1-2)
If a parameter list x = [0.7, -0.1, -0.05] is passed,
this leads to the following arguments:
meta_c = c/d' * meta_d = 2/1 * 0.7 = 1.4
meta_c2_S1 = 1.4 + cumsum([-0.1, -0.05]) = [1.3, 1.25]
If a parameter list x = [0.7, 0.1, 0.05] is passed,
this leads to the following arguments:
meta_c = c/d' * meta_d = 2/1 * 0.7 = 1.4
meta_c2_S2 = 1.4 + cumsum([0.1, 0.05]) = [1.5, 1.55]
"""
if not len(x) == self.max_conf + 1:
raise TypeError('length of x does not fit the expected length')
meta_d = x[0]
meta_c = self.c/self.d*meta_d
meta_c2 = meta_c + np.cumsum(x[1:])
return meta_d, meta_c, meta_c2
def confusion_matrix(true_label, pred_label, rating, max_conf=None):
"""
Calculate a 2x2x(max_conf + 1) confusion matrix.
Args:
true_label (array of ints): the actual stimulus condition
should be 0 for an absent stimulus and 1 for a present
stimulus
pred_label (array of ints): the predicted stimulus condition
should be 0 if the stimulus was classified as being present
and 1 if the stimulus was classified as being absent
rating (array of ints, rating >= 0): confidence rating on an
ordered integer scale (0, 1, ..., max_conf) where 0 means
low confidence and max_conf means maximal confidence
max_conf (int>=0, optional): the maximal possible confidence
value, if not given the maximal value of the given
rating is chosen. E.g., if max_conf=2, 3 possible confidence
levels are associated to the classification task
0 - unsure, 1 - neutral, 2 - sure
Returns:
conf_matrix (array of ints, shape 2x2x(max_conf + 1)): the confusion
matrix
- conf_matrix[0,:] contains all trials that were actually
negative
- conf_matrix[:,0] contains all trials that were classified as
being negative
Accordingly:
- TN are in conf_matrix[0,0]
- TP are in conf_matrix[1,1]
- FN are in conf_matrix[1,0]
- FP are in conf_matrix[0,1]
The last axis is determined by the confidence rating:
- in conf_matrix[...,0], rating was 0
- in conf_matrix[...,1], rating was 1
- in conf_matrix[...,2], rating was 2
Note:
An "ordinary" confusion matrix (i.e., without taking confidence
into account) is obtained as conf_matrix.sum(axis=-1)
"""
###################
# variable checks #
###################
true_label = np.asarray(true_label)
pred_label = np.asarray(pred_label)
if not np.allclose(true_label, true_label.astype(int)):
raise TypeError('all labels must be integers')
if not np.allclose(pred_label, pred_label.astype(int)):
raise TypeError('all labels must be integers')
true_label = true_label.astype(int)
pred_label = pred_label.astype(int)
if not np.all([true_label>=0, true_label<=1]):
raise ValeError('all labels must be 0 or 1')
if not np.all([pred_label>=0, pred_label<=1]):
raise ValeError('all labels must be 0 or 1')
#
rating = np.asarray(rating)
if not np.allclose(rating, rating.astype(int)):
raise TypeError('all ratings must be integers')
rating = rating.astype(int)
if not np.all(rating >= 0):
raise ValueError('all ratings must be equal to or larger than 0')
if max_conf is None:
max_conf = rating.max()
else:
if not type(max_conf) == int:
raise TypeError('max_conf must be an integer')
if max_conf < 0:
raise ValueError('max_conf must be >= 0')
if not np.all(rating <= max_conf):
raise ValueError('all ratings must be smaller than or equal'
' to max_conf')
##################################
# calculate the confusion matrix #
##################################
conf_matrix = np.zeros([2,2,max_conf + 1], int)
for true_now, pred_now, rating_now in zip(true_label, pred_label,
rating):
conf_matrix[true_now, pred_now, rating_now] += 1
return conf_matrix
if __name__ == "__main__":
####################################
# Simulate a binary detection task #
####################################
d = 0.6 # d_prime of type 1 task
c = 0 # c of type 1 task
N1 = 400 # number of absent stimuli
N2 = 800 # number of present stimuli
err = 1 # standard deviation of noise
c_S1 = [0.34, 1.1] # criteria for confidence in case response S1
c_S2 = [1.6, 1.9] # criteria for confidence in case response S2
true_label = np.r_[[0]*N1, [1]*N2]
x = np.r_[norm.rvs(-0.5*d, size=N1), norm.rvs(0.5*d, size=N2)]
pred_label = np.where(x<c, 0, 1)
# x2 is a noisy version of x and used to calculate confidence criteria
x2 = x + norm.rvs(scale=err, size=len(x))
# create the confidence ratings
confidence = np.zeros(x.shape, int)
for k,c_now in enumerate(c_S1):
confidence[pred_label == 0] = np.where(
x2[pred_label==0] < (c - c_now), k + 1,
confidence[pred_label==0])
for k, c_now in enumerate(c_S2):
confidence[pred_label == 1] = np.where(
x2[pred_label==1] >= (c + c_now), k + 1,
confidence[pred_label == 1])
#################################
# Now, fit the type 2 SDT model #
#################################
# calculate the confusion matrix
conf_matrix = confusion_matrix(true_label, pred_label, confidence,
max_conf=2)
model = T2SDT(conf_matrix, adjust=False)
model.fit()
print('Results of the simuluated type 2 SDT')
print('------------------------------------')
print('d\': %.2f, c: %.2f' % (model.d, model.c))
print('------------------------------------')
print('S1 model fitting success: %s' % model.success_S1)
print('S1 model fitting message:\n %s' % model.fit_message_S1)
print('meta-d_S1\': %.2f, meta-c_S1: %.2f, logL_S1: %.2f' % (
model.meta_d_S1, model.meta_c_S1, model.logL_S1))
print('------------------------------------')
print('S2 model fitting success: %s' % model.success_S2)
print('S2 model fitting message:\n %s' % model.fit_message_S2)
print('meta-d_S2\': %.2f, meta-c_S2: %.2f, logL_S2: %.2f' % (
model.meta_d_S2, model.meta_c_S2, model.logL_S2))
|
|
# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
CloudWatch service from AWS.
"""
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.ec2.cloudwatch.metric import Metric
from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem
from boto.ec2.cloudwatch.datapoint import Datapoint
from boto.regioninfo import RegionInfo
import boto
RegionData = {
'us-east-1': 'monitoring.us-east-1.amazonaws.com',
'us-gov-west-1': 'monitoring.us-gov-west-1.amazonaws.com',
'us-west-1': 'monitoring.us-west-1.amazonaws.com',
'us-west-2': 'monitoring.us-west-2.amazonaws.com',
'sa-east-1': 'monitoring.sa-east-1.amazonaws.com',
'eu-west-1': 'monitoring.eu-west-1.amazonaws.com',
'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'monitoring.ap-southeast-2.amazonaws.com',
}
def regions():
"""
Get all available regions for the CloudWatch service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=CloudWatchConnection)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.cloudwatch.CloudWatchConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.CloudWatchConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class CloudWatchConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'cloudwatch_version', '2010-08-01')
DefaultRegionName = boto.config.get('Boto', 'cloudwatch_region_name',
'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto',
'cloudwatch_region_endpoint',
'monitoring.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_sudo_id=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True):
"""
Init method to create a new connection to EC2 Monitoring Service.
B{Note:} The host argument is overridden by the host specified in the
boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
# Ugly hack to get around both a bug in Python and a
# misconfigured SSL cert for the eu-west-1 endpoint
if self.region.name == 'eu-west-1':
validate_certs = False
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs)
self.aws_sudo_id = aws_sudo_id
def make_request(self, action, params=None, path='/', verb='GET'):
if self.aws_sudo_id:
if params is None:
params = {}
params['AWSSudoId'] = self.aws_sudo_id
return AWSQueryConnection.make_request(self, action, params, path, verb)
def _required_auth_capability(self):
return ['ec2']
def build_dimension_param(self, dimension, params):
prefix = 'Dimensions.member'
i = 0
for dim_name in dimension:
dim_value = dimension[dim_name]
if dim_value:
if isinstance(dim_value, basestring):
dim_value = [dim_value]
for value in dim_value:
params['%s.%d.Name' % (prefix, i+1)] = dim_name
params['%s.%d.Value' % (prefix, i+1)] = value
i += 1
else:
params['%s.%d.Name' % (prefix, i+1)] = dim_name
i += 1
def build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for index, item in enumerate(items):
i = index + 1
if isinstance(item, dict):
for k, v in item.iteritems():
params[label % (i, 'Name')] = k
if v is not None:
params[label % (i, 'Value')] = v
else:
params[label % i] = item
def build_put_params(self, params, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
args = (name, value, unit, dimensions, statistics, timestamp)
length = max(map(lambda a: len(a) if isinstance(a, list) else 1, args))
def aslist(a):
if isinstance(a, list):
if len(a) != length:
raise Exception('Must specify equal number of elements; expected %d.' % length)
return a
return [a] * length
for index, (n, v, u, d, s, t) in enumerate(zip(*map(aslist, args))):
metric_data = {'MetricName': n}
if timestamp:
metric_data['Timestamp'] = t.isoformat()
if unit:
metric_data['Unit'] = u
if dimensions:
self.build_dimension_param(d, metric_data)
if statistics:
metric_data['StatisticValues.Maximum'] = s['maximum']
metric_data['StatisticValues.Minimum'] = s['minimum']
metric_data['StatisticValues.SampleCount'] = s['samplecount']
metric_data['StatisticValues.Sum'] = s['sum']
if value != None:
msg = 'You supplied a value and statistics for a ' + \
'metric.Posting statistics and not value.'
boto.log.warn(msg)
elif value != None:
metric_data['Value'] = v
else:
raise Exception('Must specify a value or statistics to put.')
for key, val in metric_data.iteritems():
params['MetricData.member.%d.%s' % (index + 1, key)] = val
def get_metric_statistics(self, period, start_time, end_time, metric_name,
namespace, statistics, dimensions=None,
unit=None):
"""
Get time-series data for one or more statistics of a given metric.
:type period: integer
:param period: The granularity, in seconds, of the returned datapoints.
Period must be at least 60 seconds and must be a multiple
of 60. The default value is 60.
:type start_time: datetime
:param start_time: The time stamp to use for determining the
first datapoint to return. The value specified is
inclusive; results include datapoints with the time stamp
specified.
:type end_time: datetime
:param end_time: The time stamp to use for determining the
last datapoint to return. The value specified is
exclusive; results will include datapoints up to the time
stamp specified.
:type metric_name: string
:param metric_name: The metric name.
:type namespace: string
:param namespace: The metric's namespace.
:type statistics: list
:param statistics: A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.
:type unit: string
:param unit: The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
:rtype: list
"""
params = {'Period': period,
'MetricName': metric_name,
'Namespace': namespace,
'StartTime': start_time.isoformat(),
'EndTime': end_time.isoformat()}
self.build_list_params(params, statistics, 'Statistics.member.%d')
if dimensions:
self.build_dimension_param(dimensions, params)
if unit:
params['Unit'] = unit
return self.get_list('GetMetricStatistics', params,
[('member', Datapoint)])
def list_metrics(self, next_token=None, dimensions=None,
metric_name=None, namespace=None):
"""
Returns a list of the valid metrics for which there is recorded
data available.
:type next_token: str
:param next_token: A maximum of 500 metrics will be returned
at one time. If more results are available, the ResultSet
returned will contain a non-Null next_token attribute.
Passing that token as a parameter to list_metrics will
retrieve the next page of metrics.
:type dimensions: dict
:param dimensions: A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on, a list of values to
filter on or None if you want all metrics with that
Dimension name.
:type metric_name: str
:param metric_name: The name of the Metric to filter against. If None,
all Metric names will be returned.
:type namespace: str
:param namespace: A Metric namespace to filter against (e.g. AWS/EC2).
If None, Metrics from all namespaces will be returned.
"""
params = {}
if next_token:
params['NextToken'] = next_token
if dimensions:
self.build_dimension_param(dimensions, params)
if metric_name:
params['MetricName'] = metric_name
if namespace:
params['Namespace'] = namespace
return self.get_list('ListMetrics', params, [('member', Metric)])
def put_metric_data(self, namespace, name, value=None, timestamp=None,
unit=None, dimensions=None, statistics=None):
"""
Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
associates the data points with the specified metric. If the specified
metric does not exist, Amazon CloudWatch creates the metric. If a list
is specified for some, but not all, of the arguments, the remaining
arguments are repeated a corresponding number of times.
:type namespace: str
:param namespace: The namespace of the metric.
:type name: str or list
:param name: The name of the metric.
:type value: float or list
:param value: The value for the metric.
:type timestamp: datetime or list
:param timestamp: The time stamp used for the metric. If not specified,
the default value is set to the time the metric data was received.
:type unit: string or list
:param unit: The unit of the metric. Valid Values: Seconds |
Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None
:type dimensions: dict
:param dimensions: Add extra name value pairs to associate
with the metric, i.e.:
{'name1': value1, 'name2': (value2, value3)}
:type statistics: dict or list
:param statistics: Use a statistic set instead of a value, for example::
{'maximum': 30, 'minimum': 1, 'samplecount': 100, 'sum': 10000}
"""
params = {'Namespace': namespace}
self.build_put_params(params, name, value=value, timestamp=timestamp,
unit=unit, dimensions=dimensions, statistics=statistics)
return self.get_status('PutMetricData', params, verb="POST")
def describe_alarms(self, action_prefix=None, alarm_name_prefix=None,
alarm_names=None, max_records=None, state_value=None,
next_token=None):
"""
Retrieves alarms with the specified names. If no name is specified, all
alarms for the user are returned. Alarms can be retrieved by using only
a prefix for the alarm name, the alarm state, or a prefix for any
action.
:type action_prefix: string
:param action_name: The action name prefix.
:type alarm_name_prefix: string
:param alarm_name_prefix: The alarm name prefix. AlarmNames cannot
be specified if this parameter is specified.
:type alarm_names: list
:param alarm_names: A list of alarm names to retrieve information for.
:type max_records: int
:param max_records: The maximum number of alarm descriptions
to retrieve.
:type state_value: string
:param state_value: The state value to be used in matching alarms.
:type next_token: string
:param next_token: The token returned by a previous call to
indicate that there is more data.
:rtype list
"""
params = {}
if action_prefix:
params['ActionPrefix'] = action_prefix
if alarm_name_prefix:
params['AlarmNamePrefix'] = alarm_name_prefix
elif alarm_names:
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
if state_value:
params['StateValue'] = state_value
result = self.get_list('DescribeAlarms', params,
[('MetricAlarms', MetricAlarms)])
ret = result[0]
ret.next_token = result.next_token
return ret
def describe_alarm_history(self, alarm_name=None,
start_date=None, end_date=None,
max_records=None, history_item_type=None,
next_token=None):
"""
Retrieves history for the specified alarm. Filter alarms by date range
or item type. If an alarm name is not specified, Amazon CloudWatch
returns histories for all of the owner's alarms.
Amazon CloudWatch retains the history of deleted alarms for a period of
six weeks. If an alarm has been deleted, its history can still be
queried.
:type alarm_name: string
:param alarm_name: The name of the alarm.
:type start_date: datetime
:param start_date: The starting date to retrieve alarm history.
:type end_date: datetime
:param end_date: The starting date to retrieve alarm history.
:type history_item_type: string
:param history_item_type: The type of alarm histories to retreive
(ConfigurationUpdate | StateUpdate | Action)
:type max_records: int
:param max_records: The maximum number of alarm descriptions
to retrieve.
:type next_token: string
:param next_token: The token returned by a previous call to indicate
that there is more data.
:rtype list
"""
params = {}
if alarm_name:
params['AlarmName'] = alarm_name
if start_date:
params['StartDate'] = start_date.isoformat()
if end_date:
params['EndDate'] = end_date.isoformat()
if history_item_type:
params['HistoryItemType'] = history_item_type
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeAlarmHistory', params,
[('member', AlarmHistoryItem)])
def describe_alarms_for_metric(self, metric_name, namespace, period=None,
statistic=None, dimensions=None, unit=None):
"""
Retrieves all alarms for a single metric. Specify a statistic, period,
or unit to filter the set of alarms further.
:type metric_name: string
:param metric_name: The name of the metric
:type namespace: string
:param namespace: The namespace of the metric.
:type period: int
:param period: The period in seconds over which the statistic
is applied.
:type statistic: string
:param statistic: The statistic for the metric.
:param dimension_filters: A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on, a list of values to
filter on or None if you want all metrics with that
Dimension name.
:type unit: string
:rtype list
"""
params = {'MetricName': metric_name,
'Namespace': namespace}
if period:
params['Period'] = period
if statistic:
params['Statistic'] = statistic
if dimensions:
self.build_dimension_param(dimensions, params)
if unit:
params['Unit'] = unit
return self.get_list('DescribeAlarmsForMetric', params,
[('member', MetricAlarm)])
def put_metric_alarm(self, alarm):
"""
Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.
When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.
When updating an existing alarm, its StateValue is left unchanged.
:type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
:param alarm: MetricAlarm object.
"""
params = {
'AlarmName': alarm.name,
'MetricName': alarm.metric,
'Namespace': alarm.namespace,
'Statistic': alarm.statistic,
'ComparisonOperator': alarm.comparison,
'Threshold': alarm.threshold,
'EvaluationPeriods': alarm.evaluation_periods,
'Period': alarm.period,
}
if alarm.actions_enabled is not None:
params['ActionsEnabled'] = alarm.actions_enabled
if alarm.alarm_actions:
self.build_list_params(params, alarm.alarm_actions,
'AlarmActions.member.%s')
if alarm.description:
params['AlarmDescription'] = alarm.description
if alarm.dimensions:
self.build_dimension_param(alarm.dimensions, params)
if alarm.insufficient_data_actions:
self.build_list_params(params, alarm.insufficient_data_actions,
'InsufficientDataActions.member.%s')
if alarm.ok_actions:
self.build_list_params(params, alarm.ok_actions,
'OKActions.member.%s')
if alarm.unit:
params['Unit'] = alarm.unit
alarm.connection = self
return self.get_status('PutMetricAlarm', params)
create_alarm = put_metric_alarm
update_alarm = put_metric_alarm
def delete_alarms(self, alarms):
"""
Deletes all specified alarms. In the event of an error, no
alarms are deleted.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarms, 'AlarmNames.member.%s')
return self.get_status('DeleteAlarms', params)
def set_alarm_state(self, alarm_name, state_reason, state_value,
state_reason_data=None):
"""
Temporarily sets the state of an alarm. When the updated StateValue
differs from the previous value, the action configured for the
appropriate state is invoked. This is not a permanent change. The next
periodic alarm check (in about a minute) will set the alarm to its
actual state.
:type alarm_name: string
:param alarm_name: Descriptive name for alarm.
:type state_reason: string
:param state_reason: Human readable reason.
:type state_value: string
:param state_value: OK | ALARM | INSUFFICIENT_DATA
:type state_reason_data: string
:param state_reason_data: Reason string (will be jsonified).
"""
params = {'AlarmName': alarm_name,
'StateReason': state_reason,
'StateValue': state_value}
if state_reason_data:
params['StateReasonData'] = json.dumps(state_reason_data)
return self.get_status('SetAlarmState', params)
def enable_alarm_actions(self, alarm_names):
"""
Enables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('EnableAlarmActions', params)
def disable_alarm_actions(self, alarm_names):
"""
Disables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('DisableAlarmActions', params)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import glyphsLib
def main(args=None):
if not args:
args = sys.argv[1:]
python_executable = os.path.basename(sys.executable)
parser = argparse.ArgumentParser(prog=f"{python_executable} -m glyphsLib")
subparsers = parser.add_subparsers()
parser_glyphs2ufo = subparsers.add_parser("glyphs2ufo", help=glyphs2ufo.__doc__)
parser_glyphs2ufo.set_defaults(func=glyphs2ufo)
parser_glyphs2ufo.add_argument(
"--version", action="version", version="glyphsLib %s" % glyphsLib.__version__
)
parser_glyphs2ufo.add_argument(
"glyphs_file", metavar="GLYPHS_FILE", help="Glyphs file to convert."
)
parser_glyphs2ufo.add_argument(
"-m",
"--output-dir",
default=None,
help="Output directory of masters. (default: directory of Glyphs file)",
)
parser_glyphs2ufo.add_argument(
"-d",
"--designspace-path",
default=None,
help="Output path of designspace file. (default: directory of Glyphs file)",
)
parser_glyphs2ufo.add_argument(
"-n",
"--instance-dir",
default=None,
help=(
"Output directory of instances. (default: output_dir/instance_ufos"
"). This sets the file path for instances inside the designspace "
"file."
),
)
parser_glyphs2ufo.add_argument(
"--ufo-module",
metavar="UFO_MODULE",
choices=("ufoLib2", "defcon"),
default="ufoLib2",
help=(
"Select the default library for writing UFOs. Choose between: %(choices)s "
"(default: %(default)s)"
),
)
group = parser_glyphs2ufo.add_argument_group(
"Roundtripping between Glyphs and UFOs"
)
group.add_argument(
"--minimal",
action="store_true",
help=("Create minimal UFO files with only data needed for building fonts."),
)
group.add_argument(
"--no-preserve-glyphsapp-metadata",
action="store_false",
help=(
"Skip preserving Glyphs metadata in master UFOs and designspace "
"file, which would be used to minimize differences when "
"roundtripping between Glyphs and UFOs."
),
)
group.add_argument(
"--propagate-anchors",
default=None,
action="store_true",
help=(
"Copy anchors from underlying components to actual "
"glyph. Glyphs would do this implicitly, only use if you need "
"full control over all anchors."
),
)
group.add_argument(
"--generate-GDEF",
action="store_true",
help=(
"write a `table GDEF {...}` statement in the UFO features "
"containing `GlyphClassDef` and `LigatureCaretByPos` statements"
),
)
group.add_argument(
"-N",
"--normalize-ufos",
action="store_true",
help=(
"Normalize UFOs with ufonormalizer, which avoids "
"differences due to spacing, reordering of keys, etc."
),
)
group.add_argument(
"--create-background-layers",
action="store_true",
help=(
"Create background layers for all glyphs unless present, "
"this can help in a workflow with multiple tools that "
"may create background layers automatically."
),
)
group.add_argument(
"--no-store-editor-state",
action="store_true",
help=(
"Skip storing editor state in the UFO, like which glyphs are open "
"in which tab (DisplayStrings)."
),
)
group.add_argument(
"--write-public-skip-export-glyphs",
action="store_true",
help=(
"Store the glyph export flag in the `public.skipExportGlyphs` list "
"instead of the glyph-level 'com.schriftgestaltung.Glyphs.Export' lib "
"key."
),
)
group = parser_glyphs2ufo.add_argument_group("Glyph data")
group.add_argument(
"--glyph-data",
action="append",
metavar="GLYPHDATA",
help=(
"Custom GlyphData XML file with glyph info (production name, "
"script, category, subCategory, etc.). Can be used more than once."
),
)
parser_ufo2glyphs = subparsers.add_parser("ufo2glyphs", help=ufo2glyphs.__doc__)
parser_ufo2glyphs.set_defaults(func=ufo2glyphs)
parser_ufo2glyphs.add_argument(
"--version", action="version", version="glyphsLib %s" % glyphsLib.__version__
)
parser_ufo2glyphs.add_argument(
"designspace_file_or_UFOs",
nargs="+",
metavar="DESIGNSPACE_FILE_OR_UFOS",
help=(
"A single designspace file *or* one or more UFOs to convert to "
"a Glyphs file."
),
)
parser_ufo2glyphs.add_argument(
"--output-path", help="The path to write the Glyphs file to."
)
parser_ufo2glyphs.add_argument(
"--ufo-module",
metavar="UFO_MODULE",
choices=("ufoLib2", "defcon"),
default="ufoLib2",
help=(
"Select the default library for reading UFOs. Choose between: %(choices)s "
"(default: %(default)s)"
),
)
group = parser_ufo2glyphs.add_argument_group(
"Roundtripping between UFOs and Glyphs"
)
group.add_argument(
"--no-preserve-glyphsapp-metadata",
action="store_false",
help=(
"Skip preserving Glyphs metadata in master UFOs and designspace "
"file, which would be used to minimize differences when "
"roundtripping between Glyphs and UFOs."
),
)
group.add_argument(
"--enable-last-change",
action="store_false",
help="Store modification timestamp in glyphs. Unnecessary when using Git.",
)
group.add_argument(
"--enable-automatic-alignment",
action="store_false",
help="Enable automatic alignment of components in glyphs.",
)
options = parser.parse_args(args)
if "func" in vars(options):
return options.func(options)
else:
parser.print_help()
def glyphs2ufo(options):
"""Converts a Glyphs.app source file into UFO masters and a designspace file."""
if options.output_dir is None:
options.output_dir = os.path.dirname(options.glyphs_file) or "."
if options.designspace_path is None:
options.designspace_path = os.path.join(
options.output_dir,
os.path.basename(os.path.splitext(options.glyphs_file)[0]) + ".designspace",
)
# If options.instance_dir is None, instance UFO paths in the designspace
# file will either use the value in customParameter's UFO_FILENAME_CUSTOM_PARAM or
# be made relative to "instance_ufos/".
glyphsLib.build_masters(
options.glyphs_file,
options.output_dir,
options.instance_dir,
designspace_path=options.designspace_path,
minimize_glyphs_diffs=options.no_preserve_glyphsapp_metadata,
propagate_anchors=options.propagate_anchors,
normalize_ufos=options.normalize_ufos,
create_background_layers=options.create_background_layers,
generate_GDEF=options.generate_GDEF,
store_editor_state=not options.no_store_editor_state,
write_skipexportglyphs=options.write_public_skip_export_glyphs,
ufo_module=__import__(options.ufo_module),
minimal=options.minimal,
glyph_data=options.glyph_data or None,
)
def _glyphs2ufo_entry_point():
"""Provides entry point for a script to keep argparsing in main()."""
args = sys.argv[1:]
args.insert(0, "glyphs2ufo")
return main(args)
def ufo2glyphs(options):
"""Convert one designspace file or one or more UFOs to a Glyphs.app source file."""
import fontTools.designspaceLib
from glyphsLib.util import open_ufo
ufo_module = __import__(options.ufo_module)
sources = options.designspace_file_or_UFOs
designspace_file = None
if (
len(sources) == 1
and sources[0].endswith(".designspace")
and os.path.isfile(sources[0])
):
designspace_file = sources[0]
designspace = fontTools.designspaceLib.DesignSpaceDocument()
designspace.read(designspace_file)
object_to_read = designspace
elif all(source.endswith(".ufo") and os.path.isdir(source) for source in sources):
ufos = [open_ufo(source, ufo_module.Font) for source in sources]
ufos.sort(
key=lambda ufo: [ # Order the masters by weight and width
ufo.info.openTypeOS2WeightClass or 400,
ufo.info.openTypeOS2WidthClass or 5,
]
)
object_to_read = ufos
else:
print(
"Please specify just one designspace file *or* one or more "
"UFOs. They must end in '.designspace' or '.ufo', respectively.",
file=sys.stderr,
)
return 1
font = glyphsLib.to_glyphs(
object_to_read,
ufo_module=ufo_module,
minimize_ufo_diffs=options.no_preserve_glyphsapp_metadata,
)
# Make the Glyphs file more suitable for roundtrip:
font.customParameters["Disable Last Change"] = options.enable_last_change
font.disablesAutomaticAlignment = options.enable_automatic_alignment
if options.output_path:
font.save(options.output_path)
else:
if designspace_file:
filename_to_write = os.path.splitext(designspace_file)[0] + ".glyphs"
else:
filename_to_write = os.path.join(
os.path.dirname(sources[0]),
font.familyName.replace(" ", "") + ".glyphs",
)
font.save(filename_to_write)
def _ufo2glyphs_entry_point():
"""Provides entry point for a script to keep argparsing in main()."""
args = sys.argv[1:]
args.insert(0, "ufo2glyphs")
return main(args)
|
|
# -*- coding: utf-8 -*-
"""Playlist model."""
from balistos.models.user import User
from datetime import datetime
from datetime import timedelta
from pyramid_basemodel import Base
from pyramid_basemodel import BaseMixin
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Unicode
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
class Playlist(Base, BaseMixin):
"""A class representing a Playlist."""
__tablename__ = 'playlists'
uri = Column(
String,
unique=True,
nullable=False
)
title = Column(
Unicode(200),
nullable=False
)
description = Column(
Unicode(200),
nullable=True,
)
duration_limit = Column(
Integer,
nullable=False,
default=600
)
locked = Column(
Boolean,
nullable=False,
default=False
)
public = Column(
Boolean,
nullable=False,
default=True
)
last_active = Column(
DateTime,
nullable=False,
default=datetime.now()
)
@classmethod
def get(self, uri):
"""Get a Playlist by uri."""
result = Playlist.query.filter_by(uri=uri)
if result.count() < 1:
return None
return result.one()
@classmethod
def search_title(self, search_string):
"""Get Playlist by searching title."""
search_string = '%' + search_string + '%'
result = Playlist.query.filter(
Playlist.title.ilike(search_string),
)
if result.count() < 1:
return []
return result.all()
@classmethod
def get_active_playlists(self):
"""Get active playlists"""
result = Playlist.query.filter(
Playlist.last_active > datetime.now() - timedelta(0, 10)
)
if result.count() < 1:
return []
return result.all()
@classmethod
def get_all(class_, order_by='title', filter_by=None):
"""Return all Playlists.
filter_by: dict -> {'name': 'foo'}
By default, order by Playlist.title.
"""
Playlist = class_
q = Playlist.query
q = q.order_by(getattr(Playlist, order_by))
if filter_by:
q = q.filter_by(**filter_by)
return q
@classmethod
def get_popular(class_, order_by='title', filter_by=None):
"""Return all Playlists.
filter_by: dict -> {'name': 'foo'}
By default, order by Playlist.title.
"""
Playlist = class_
q = Playlist.query
#TODO popular mechanism
q = q.order_by(getattr(Playlist, order_by)).limit(10)
if filter_by:
q = q.filter_by(**filter_by)
return q
class PlaylistUser(Base, BaseMixin):
__tablename__ = 'playlist_user'
# permission on playlist
# 0 - no permission
# 1 - add, vote, chat on playlist
# 2 - admin rights on playlist
permission = Column(
Integer,
nullable=False
)
last_active = Column(
DateTime,
nullable=False,
default=datetime.now()
)
playlist_id = Column(Integer, ForeignKey('playlists.id'))
playlist = relationship(
Playlist,
single_parent=True,
backref=backref(
'playlist_users',
cascade='all, delete-orphan',
single_parent=False,
uselist=True,
)
)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(
User,
single_parent=True,
backref=backref(
'playlist_users',
cascade='all, delete-orphan',
single_parent=False,
uselist=True,
)
)
@classmethod
def get_by_playlist(self, playlist,):
"""Get PlaylistClipUser by Playlist"""
result = PlaylistUser.query.filter(
PlaylistUser.playlist == playlist,
)
if result.count() < 1:
return []
return result.all()
@classmethod
def get_by_playlist_and_user(self, playlist, user):
"""Get PlaylistClipUser by Playlist and User."""
result = PlaylistUser.query.filter(
PlaylistUser.playlist == playlist,
PlaylistUser.user == user
)
if result.count() < 1:
return None
return result.one()
@classmethod
def get_active_users_for_playlist(self, playlist):
"""Get a Playlist by uri."""
result = PlaylistUser.query.filter(
PlaylistUser.playlist == playlist,
PlaylistUser.last_active > datetime.now() - timedelta(0, 10)
)
if result.count() < 1:
return []
return result.all()
@classmethod
def get_by_user_latest(self, user, limit=1):
"""Get PlaylistClipUser by Playlist and User."""
result = PlaylistUser.query.filter(
PlaylistUser.user == user
).order_by(PlaylistUser.last_active.desc()).limit(limit)
if result.count() < 1:
return []
return result.all()
class ChatMessage(Base, BaseMixin):
__tablename__ = 'chat_messages'
message = Column(
Unicode(200),
nullable=True,
)
playlist_id = Column(Integer, ForeignKey('playlists.id'))
playlist = relationship(
Playlist,
single_parent=True,
backref=backref(
'chats',
cascade='all, delete-orphan',
single_parent=False,
uselist=True,
)
)
posted = Column(
DateTime,
nullable=False,
)
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(
User,
single_parent=True,
backref=backref(
'chats',
cascade='all, delete-orphan',
single_parent=False,
uselist=True,
)
)
@classmethod
def get_by_playlist_and_user(self, playlist, user):
"""Get ChatMessage by PlaylistClip and User."""
result = ChatMessage.query.filter(
ChatMessage.playlist == playlist,
ChatMessage.user == user
).order_by('posted')
if result.count() < 1:
return None
return result.all()
@classmethod
def get_by_playlist(self, playlist):
"""Get latest Chatmessages of playlist."""
result = ChatMessage.query.filter(
ChatMessage.playlist == playlist,
).order_by(
ChatMessage.posted.desc()).limit(20)
if result.count() < 1:
return []
return reversed(result.all())
|
|
# Copyright (c) 2014 Quanta Research Cambridge, Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import math
import re
import functools
import json
import os
import sys
import traceback
import AST
import globalv
import util
verbose = False
tempFilename = 'generatedDesignInterfaceFile.json'
lookupTable = {}
class InterfaceMixin:
def getSubinterface(self, name):
subinterfaceName = name
if not globalv.globalvars.has_key(subinterfaceName):
return None
subinterface = globalv.globalvars[subinterfaceName]
#print 'subinterface', subinterface, subinterface
return subinterface
def parentClass(self, default):
rv = default if (len(self.typeClassInstances)==0) else (self.typeClassInstances[0])
return rv
def dtInfo(arg):
rc = {}
if hasattr(arg, 'name'):
rc['name'] = arg.name
if lookupTable.get(arg.name):
rc['name'] = lookupTable[arg.name]
if hasattr(arg, 'type') and arg.type != 'Type':
rc['type'] = arg.type
if lookupTable.get(arg.type):
rc['type'] = lookupTable[arg.type]
if hasattr(arg, 'params'):
if arg.params is not None and arg.params != []:
rc['params'] = [dtInfo(p) for p in arg.params]
if hasattr(arg, 'elements'):
if arg.type == 'Enum':
rc['elements'] = arg.elements
else:
rc['elements'] = [piInfo(p) for p in arg.elements]
return rc
def piInfo(pitem):
rc = {}
rc['pname'] = pitem.name
rc['ptype'] = dtInfo(pitem.type)
if hasattr(pitem, 'oldtype'):
rc['oldtype'] = dtInfo(pitem.oldtype)
return rc
def declInfo(mitem):
rc = {}
rc['dname'] = mitem.name
rc['dparams'] = []
for pitem in mitem.params:
rc['dparams'].append(piInfo(pitem))
return rc
def classInfo(item):
rc = {
'Package': os.path.splitext(os.path.basename(item.package))[0],
'cname': item.name,
'cdecls': [],
}
for mitem in item.decls:
rc['cdecls'].append(declInfo(mitem))
return rc
def serialize_json(interfaces, globalimports, bsvdefines):
global verbose
itemlist = []
for item in interfaces:
itemlist.append(classInfo(item))
jfile = open(tempFilename, 'w')
toplevel = {}
toplevel['interfaces'] = itemlist
gdlist = []
for item in globalv.globaldecls:
if item.type == 'TypeDef':
newitem = {'dtype': item.type}
newitem['tname'] = item.name
newitem['tdtype'] = dtInfo(item.tdtype)
if item.params:
newitem['tparams'] = item.params
if verbose:
print 'TYPEDEF globaldecl:', item, newitem
gdlist.append(newitem)
elif verbose:
print 'Unprocessed globaldecl:', item
toplevel['globaldecls'] = gdlist
toplevel['globalimports'] = globalimports
toplevel['bsvdefines'] = bsvdefines
if True:
try:
json.dump(toplevel, jfile, sort_keys = True, indent = 4)
jfile.close()
j2file = open(tempFilename).read()
toplevelnew = json.loads(j2file)
except:
print 'Unable to encode json file', tempFilename
#print 'WWWW', toplevel
sys.exit(-1)
return toplevel
class Method:
def __init__(self, name, return_type, params):
self.type = 'Method'
self.name = name
self.return_type = return_type
self.params = params
def __repr__(self):
sparams = [p.__repr__() for p in self.params]
return '<method: %s %s %s>' % (self.name, self.return_type, sparams)
def instantiate(self, paramBindings):
#print 'instantiate method', self.name, self.params
return Method(self.name,
self.return_type.instantiate(paramBindings),
[ p.instantiate(paramBindings) for p in self.params])
class Function:
def __init__(self, name, return_type, params):
self.type = 'Function'
self.name = name
self.return_type = return_type
self.params = params
def __repr__(self):
if not self.params:
return '<function: %s %s NONE>' % (self.name, self.return_type)
sparams = map(str, self.params)
return '<function: %s %s %s>' % (self.name, self.return_type, sparams)
class Variable:
def __init__(self, name, t, value):
self.type = 'Variable'
self.name = name
self.type = t
self.value = value
if t and t.type == 'Type' and t.name == 'Integer' and value and value.type == 'Type':
lookupTable[name] = value.name
def __repr__(self):
return '<variable: %s : %s>' % (self.name, self.type)
class Interface(InterfaceMixin):
def __init__(self, name, params, decls, subinterfacename, packagename):
self.type = 'Interface'
self.name = name
self.params = params
self.decls = decls
self.subinterfacename = subinterfacename
self.typeClassInstances = []
self.package = packagename
def interfaceType(self):
return Type(self.name,self.params)
def __repr__(self):
return '{interface: %s (%s) : %s}' % (self.name, self.params, self.typeClassInstances)
def instantiate(self, paramBindings):
newInterface = Interface(self.name, [],
[d.instantiate(paramBindings) for d in self.decls],
self.subinterfacename,
self.package)
newInterface.typeClassInstances = self.typeClassInstances
return newInterface
class Typeclass:
def __init__(self, name):
self.name = name
self.type = 'TypeClass'
def __repr__(self):
return '{typeclass %s}' % (self.name)
class TypeclassInstance:
def __init__(self, name, params, provisos, decl):
self.name = name
self.params = params
self.provisos = provisos
self.decl = decl
self.type = 'TypeclassInstance'
def __repr__(self):
return '{typeclassinstance %s %s}' % (self.name, self.params)
class Module:
def __init__(self, moduleContext, name, params, interface, provisos, decls):
self.type = 'Module'
self.name = name
self.moduleContext = moduleContext
self.interface = interface
self.params = params
self.provisos = provisos
self.decls = decls
def __repr__(self):
return '{module: %s %s}' % (self.name, self.decls)
class EnumElement:
def __init__(self, name, qualifiers, value):
self.qualifiers = qualifiers
self.value = value
def __repr__(self):
return '{enumelt: %s}' % (self.name)
class Enum:
def __init__(self, elements):
self.type = 'Enum'
self.elements = elements
def __repr__(self):
return '{enum: %s}' % (self.elements)
def instantiate(self, paramBindings):
return self
class StructMember:
def __init__(self, t, name):
self.type = t
self.name = name
def __repr__(self):
return '{field: %s %s}' % (self.type, self.name)
def instantiate(self, paramBindings):
return StructMember(self.type.instantiate(paramBindings), self.name)
class Struct:
def __init__(self, elements):
self.type = 'Struct'
self.elements = elements
def __repr__(self):
return '{struct: %s}' % (self.elements)
def instantiate(self, paramBindings):
return Struct([e.instantiate(paramBindings) for e in self.elements])
class TypeDef:
def __init__(self, tdtype, name, params):
self.name = name
self.params = params
self.type = 'TypeDef'
self.tdtype = tdtype
if tdtype and tdtype.type != 'Type':
tdtype.name = name
self.type = 'TypeDef'
def __repr__(self):
return '{typedef: %s %s}' % (self.tdtype, self.name)
class Param:
def __init__(self, name, t):
self.name = name
self.type = t
def __repr__(self):
return '{param %s: %s}' % (self.name, self.type)
def instantiate(self, paramBindings):
return Param(self.name,
self.type.instantiate(paramBindings))
class Type:
def __init__(self, name, params):
self.type = 'Type'
self.name = name
if params:
self.params = params
else:
self.params = []
def __repr__(self):
sparams = map(str, self.params)
return '{type: %s %s}' % (self.name, sparams)
def instantiate(self, paramBindings):
#print 'Type.instantiate', self.name, paramBindings
if paramBindings.has_key(self.name):
return paramBindings[self.name]
else:
return Type(self.name, [p.instantiate(paramBindings) for p in self.params])
|
|
"""
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import mimetypes
import os
import re
import stat
import sys
import urllib
from google.appengine._internal.django.core.management.color import color_style
from google.appengine._internal.django.utils.http import http_date
from google.appengine._internal.django.utils._os import safe_join
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException(e)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from google.appengine._internal.django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
class AdminMediaHandler(object):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def __init__(self, application, media_dir=None):
from google.appengine._internal.django.conf import settings
self.application = application
if not media_dir:
import django
self.media_dir = os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
else:
self.media_dir = media_dir
self.media_url = settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ADMIN_MEDIA_PREFIX. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ADMIN_MEDIA_PREFIX.
relative_url = url[len(self.media_url):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.media_dir, relative_path)
def __call__(self, environ, start_response):
import os.path
# Ignore requests that aren't under ADMIN_MEDIA_PREFIX. Also ignore
# all requests if ADMIN_MEDIA_PREFIX isn't a relative URL.
if self.media_url.startswith('http://') or self.media_url.startswith('https://') or not environ['PATH_INFO'].startswith(self.media_url):
return self.application(environ, start_response)
# Find the admin file and serve it up, if it exists and is readable.
try:
file_path = self.file_path(environ['PATH_INFO'])
except ValueError: # Resulting file path was not valid.
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
start_response(status, headers.items())
return output
if not os.path.exists(file_path):
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
else:
try:
fp = open(file_path, 'rb')
except IOError:
status = '401 UNAUTHORIZED'
headers = {'Content-type': 'text/plain'}
output = ['Permission denied: %s' % environ['PATH_INFO']]
else:
# This is a very simple implementation of conditional GET with
# the Last-Modified header. It makes media files a bit speedier
# because the files are only read off disk for the first
# request (assuming the browser/client supports conditional
# GET).
mtime = http_date(os.stat(file_path)[stat.ST_MTIME])
headers = {'Last-Modified': mtime}
if environ.get('HTTP_IF_MODIFIED_SINCE', None) == mtime:
status = '304 NOT MODIFIED'
output = []
else:
status = '200 OK'
mime_type = mimetypes.guess_type(file_path)[0]
if mime_type:
headers['Content-Type'] = mime_type
output = [fp.read()]
fp.close()
start_response(status, headers.items())
return output
def run(addr, port, wsgi_handler):
server_address = (addr, port)
httpd = WSGIServer(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import wait_until
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.services.kafka.directory import kafka_dir
from kafkatest.services.kafka.version import TRUNK, LATEST_0_8_2
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.security_config import SecurityConfig
import itertools
import os
import subprocess
def is_int(msg):
"""Default method used to check whether text pulled from console consumer is a message.
return int or None
"""
try:
return int(msg)
except ValueError:
return None
"""
0.8.2.1 ConsoleConsumer options
The console consumer is a tool that reads data from Kafka and outputs it to standard output.
Option Description
------ -----------
--blacklist <blacklist> Blacklist of topics to exclude from
consumption.
--consumer.config <config file> Consumer config properties file.
--csv-reporter-enabled If set, the CSV metrics reporter will
be enabled
--delete-consumer-offsets If specified, the consumer path in
zookeeper is deleted when starting up
--formatter <class> The name of a class to use for
formatting kafka messages for
display. (default: kafka.tools.
DefaultMessageFormatter)
--from-beginning If the consumer does not already have
an established offset to consume
from, start with the earliest
message present in the log rather
than the latest message.
--max-messages <Integer: num_messages> The maximum number of messages to
consume before exiting. If not set,
consumption is continual.
--metrics-dir <metrics dictory> If csv-reporter-enable is set, and
this parameter isset, the csv
metrics will be outputed here
--property <prop>
--skip-message-on-error If there is an error when processing a
message, skip it instead of halt.
--topic <topic> The topic id to consume on.
--whitelist <whitelist> Whitelist of topics to include for
consumption.
--zookeeper <urls> REQUIRED: The connection string for
the zookeeper connection in the form
host:port. Multiple URLS can be
given to allow fail-over.
"""
class ConsoleConsumer(JmxMixin, BackgroundThreadService):
# Root directory for persistent output
PERSISTENT_ROOT = "/mnt/console_consumer"
STDOUT_CAPTURE = os.path.join(PERSISTENT_ROOT, "console_consumer.stdout")
STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "console_consumer.stderr")
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "console_consumer.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "console_consumer.properties")
logs = {
"consumer_stdout": {
"path": STDOUT_CAPTURE,
"collect_default": False},
"consumer_stderr": {
"path": STDERR_CAPTURE,
"collect_default": False},
"consumer_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, kafka, topic, new_consumer=False, message_validator=None,
from_beginning=True, consumer_timeout_ms=None, version=TRUNK, client_id="console-consumer", jmx_object_names=None, jmx_attributes=[]):
"""
Args:
context: standard context
num_nodes: number of nodes to use (this should be 1)
kafka: kafka service
topic: consume from this topic
new_consumer: use new Kafka consumer if True
message_validator: function which returns message or None
from_beginning: consume from beginning if True, else from the end
consumer_timeout_ms: corresponds to consumer.timeout.ms. consumer process ends if time between
successively consumed messages exceeds this timeout. Setting this and
waiting for the consumer to stop is a pretty good way to consume all messages
in a topic.
"""
JmxMixin.__init__(self, num_nodes, jmx_object_names, jmx_attributes)
BackgroundThreadService.__init__(self, context, num_nodes)
self.kafka = kafka
self.new_consumer = new_consumer
self.args = {
'topic': topic,
}
self.consumer_timeout_ms = consumer_timeout_ms
for node in self.nodes:
node.version = version
self.from_beginning = from_beginning
self.message_validator = message_validator
self.messages_consumed = {idx: [] for idx in range(1, num_nodes + 1)}
self.client_id = client_id
def prop_file(self, node):
"""Return a string which can be used to create a configuration file appropriate for the given node."""
# Process client configuration
prop_file = self.render('console_consumer.properties')
if hasattr(node, "version") and node.version <= LATEST_0_8_2:
# in 0.8.2.X and earlier, console consumer does not have --timeout-ms option
# instead, we have to pass it through the config file
prop_file += "\nconsumer.timeout.ms=%s\n" % str(self.consumer_timeout_ms)
# Add security properties to the config. If security protocol is not specified,
# use the default in the template properties.
self.security_config = self.kafka.security_config.client_config(prop_file)
prop_file += str(self.security_config)
return prop_file
def start_cmd(self, node):
"""Return the start command appropriate for the given node."""
args = self.args.copy()
args['zk_connect'] = self.kafka.zk.connect_setting()
args['stdout'] = ConsoleConsumer.STDOUT_CAPTURE
args['stderr'] = ConsoleConsumer.STDERR_CAPTURE
args['log_dir'] = ConsoleConsumer.LOG_DIR
args['log4j_config'] = ConsoleConsumer.LOG4J_CONFIG
args['config_file'] = ConsoleConsumer.CONFIG_FILE
args['stdout'] = ConsoleConsumer.STDOUT_CAPTURE
args['jmx_port'] = self.jmx_port
args['kafka_dir'] = kafka_dir(node)
args['broker_list'] = self.kafka.bootstrap_servers()
args['kafka_opts'] = self.security_config.kafka_opts
cmd = "export JMX_PORT=%(jmx_port)s; " \
"export LOG_DIR=%(log_dir)s; " \
"export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j_config)s\"; " \
"export KAFKA_OPTS=%(kafka_opts)s; " \
"/opt/%(kafka_dir)s/bin/kafka-console-consumer.sh " \
"--topic %(topic)s --consumer.config %(config_file)s" % args
if self.new_consumer:
cmd += " --new-consumer --bootstrap-server %(broker_list)s" % args
else:
cmd += " --zookeeper %(zk_connect)s" % args
if self.from_beginning:
cmd += " --from-beginning"
if self.consumer_timeout_ms is not None:
# version 0.8.X and below do not support --timeout-ms option
# This will be added in the properties file instead
if node.version > LATEST_0_8_2:
cmd += " --timeout-ms %s" % self.consumer_timeout_ms
cmd += " 2>> %(stderr)s | tee -a %(stdout)s &" % args
return cmd
def pids(self, node):
try:
cmd = "ps ax | grep -i console_consumer | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % ConsoleConsumer.PERSISTENT_ROOT, allow_fail=False)
# Create and upload config file
self.logger.info("console_consumer.properties:")
prop_file = self.prop_file(node)
self.logger.info(prop_file)
node.account.create_file(ConsoleConsumer.CONFIG_FILE, prop_file)
self.security_config.setup_node(node)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=ConsoleConsumer.LOG_FILE)
node.account.create_file(ConsoleConsumer.LOG4J_CONFIG, log_config)
# Run and capture output
cmd = self.start_cmd(node)
self.logger.debug("Console consumer %d command: %s", idx, cmd)
consumer_output = node.account.ssh_capture(cmd, allow_fail=False)
first_line = next(consumer_output, None)
if first_line is not None:
self.start_jmx_tool(idx, node)
for line in itertools.chain([first_line], consumer_output):
msg = line.strip()
if self.message_validator is not None:
msg = self.message_validator(msg)
if msg is not None:
self.messages_consumed[idx].append(msg)
self.read_jmx_output(idx, node)
def start_node(self, node):
BackgroundThreadService.start_node(self, node)
def stop_node(self, node):
node.account.kill_process("console_consumer", allow_fail=True)
wait_until(lambda: not self.alive(node), timeout_sec=10, backoff_sec=.2,
err_msg="Timed out waiting for consumer to stop.")
def clean_node(self, node):
if self.alive(node):
self.logger.warn("%s %s was still alive at cleanup time. Killing forcefully..." %
(self.__class__.__name__, node.account))
JmxMixin.clean_node(self, node)
node.account.kill_process("java", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % ConsoleConsumer.PERSISTENT_ROOT, allow_fail=False)
self.security_config.clean_node(node)
|
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class Export(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'created': 'datetime',
'account_id': 'int',
'user_id': 'int',
'entity': 'str',
'filter': 'object'
}
attribute_map = {
'id': 'id',
'created': 'created',
'account_id': 'accountId',
'user_id': 'userId',
'entity': 'entity',
'filter': 'filter'
}
def __init__(self, id=None, created=None, account_id=None, user_id=None, entity=None, filter=None, local_vars_configuration=None): # noqa: E501
"""Export - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._created = None
self._account_id = None
self._user_id = None
self._entity = None
self._filter = None
self.discriminator = None
self.id = id
self.created = created
self.account_id = account_id
self.user_id = user_id
self.entity = entity
self.filter = filter
@property
def id(self):
"""Gets the id of this Export. # noqa: E501
Unique ID for this entity. # noqa: E501
:return: The id of this Export. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Export.
Unique ID for this entity. # noqa: E501
:param id: The id of this Export. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def created(self):
"""Gets the created of this Export. # noqa: E501
The exact moment this entity was created. # noqa: E501
:return: The created of this Export. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Export.
The exact moment this entity was created. # noqa: E501
:param created: The created of this Export. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def account_id(self):
"""Gets the account_id of this Export. # noqa: E501
The ID of the account that owns this entity. # noqa: E501
:return: The account_id of this Export. # noqa: E501
:rtype: int
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this Export.
The ID of the account that owns this entity. # noqa: E501
:param account_id: The account_id of this Export. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and account_id is None: # noqa: E501
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def user_id(self):
"""Gets the user_id of this Export. # noqa: E501
The ID of the account that owns this entity. # noqa: E501
:return: The user_id of this Export. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this Export.
The ID of the account that owns this entity. # noqa: E501
:param user_id: The user_id of this Export. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def entity(self):
"""Gets the entity of this Export. # noqa: E501
The name of the entity that was exported. # noqa: E501
:return: The entity of this Export. # noqa: E501
:rtype: str
"""
return self._entity
@entity.setter
def entity(self, entity):
"""Sets the entity of this Export.
The name of the entity that was exported. # noqa: E501
:param entity: The entity of this Export. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and entity is None: # noqa: E501
raise ValueError("Invalid value for `entity`, must not be `None`") # noqa: E501
allowed_values = ["Coupon", "Effect", "CustomerSession"] # noqa: E501
if self.local_vars_configuration.client_side_validation and entity not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `entity` ({0}), must be one of {1}" # noqa: E501
.format(entity, allowed_values)
)
self._entity = entity
@property
def filter(self):
"""Gets the filter of this Export. # noqa: E501
Map of keys and values that were used to filter the exported rows # noqa: E501
:return: The filter of this Export. # noqa: E501
:rtype: object
"""
return self._filter
@filter.setter
def filter(self, filter):
"""Sets the filter of this Export.
Map of keys and values that were used to filter the exported rows # noqa: E501
:param filter: The filter of this Export. # noqa: E501
:type: object
"""
if self.local_vars_configuration.client_side_validation and filter is None: # noqa: E501
raise ValueError("Invalid value for `filter`, must not be `None`") # noqa: E501
self._filter = filter
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Export):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Export):
return True
return self.to_dict() != other.to_dict()
|
|
"""
The :mod:`findig.tools.counter` module defines the :class:`Counter` tool,
which can be used as a hit counter for your application. Counters can
count hits to a particular resource, or globally within the application.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter as PyCounter, namedtuple
from datetime import datetime, timedelta
from itertools import chain, combinations
from functools import partial, reduce
from threading import Lock
import heapq
import pickle
from findig.context import ctx
class Counter:
"""
A :class:`Counter` counter keeps track of hits (requests) made on an
application and its resources.
:param app: The findig application whose requests the counter will track.
:type app: :class:`findig.App`, or a subclass like
:class:`findig.json.App`.
:param duration: If given, the counter will only track hits that
occurred less than this duration before the current time.
Otherwise, all hits are tracked.
:type duration: :class:`datetime.timedelta` or int representing seconds.
:param storage: A subclass of :class:`AbstractLog` that should be used
to store hits. By default, the counter will use a thread-safe,
in-memory storage class.
"""
any = [] # just needed an unhashable object here
def __init__(self, app=None, duration=-1, storage=None):
self.logs = {}
self.callbacks = {
'before': {None: []},
'after': {None: []},
}
self.duration = duration
self.partitioners = {}
self.log_cls = _HitLog if storage is None else storage
if app is not None:
self.attach_to(app)
def attach_to(self, app):
"""
Attach the counter to a findig application.
.. note:: This is called automatically for any app that is passed
to the counter's constructor.
By attaching the counter to a findig application, the counter is
enabled to wrap count hits to the application and fire callbacks.
:param app: The findig application whose requests the counter will
track.
:type app: :class:`findig.App`, or a subclass like
:class:`findig.json.App`.
"""
app.context(self)
def partition(self, name, fgroup=None):
"""
Create a partition that is tracked by the counter.
A partition can be thought of as a set of mutually exclusive
groups that hits fall into, such that each hit can only belong to
one group in any single partition. For example, if we
partition a counter by the IP address of the requesting clients,
each possible client address can be thought of as one group, since
it's only possible for any given hit to come from just one of those
addresses.
For every partition, a *grouping function* must be supplied to help
the counter determine which group a hit belongs to. The
grouping function takes a request as its parameter, and returns
a hashable result that identifies the group. For example, if we
partition by IP address, our grouping function can either return
the IP address's string representation or 32-bit (for IPv4)
integer value.
By setting up partitions, we can query a counter for the number of
hits belonging to a particular group in any of our partitions. For
example, if we wanted to count the number GET requests, we could
partition the counter on the request method (here our groups would
be GET, PUT, POST, etc) and query the counter for the number of
hits in the GET group in our request method partition::
counter = Counter(app)
# Create a partition named 'method', which partitions our
# hits by the request method (in uppercase).
counter.partition('method', lambda request: request.method.upper())
# Now we can query the counter for hits belonging to the 'GET'
# group in our 'method' partition
hits = counter.hits()
number_of_gets = hits.count(method='GET')
:param name: The name for our partition.
:param fgroup: The grouping function for the partition. It must]
be a callable that takes a request and returns a hashable
value that identifies the group that the request falls into.
This method can be used as a decorator factory::
@counter.partition('ip')
def getip(request):
return request.remote_addr
A counter may define more than one partition.
"""
def add_partitioner(keyfunc):
self.partitioners[name] = keyfunc
return keyfunc
if fgroup is not None:
return add_partitioner(fgroup)
else:
return add_partitioner
def _register_cb(self, when, n, callback, args):
allowed_args = ['until', 'after', 'resource']
allowed_args.extend(self.partitioners)
for a in args:
if a not in allowed_args:
raise TypeError("Unknown argument: {}".format(a))
key = args.pop('resource').name if 'resource' in args else None
self.callbacks[when].setdefault(key, [])
self.callbacks[when][key].append((callback, n, args))
def every(self, n, callback=None, **args):
"""
Call a callback every *n* hits.
:param resource: If given, the callback will be called on every
*n* hits to the resource.
:param after: If given, the callback won't be called until *after*
this number of hits; it will be called on the (after+1)th hit
and every nth hit thereafter.
:param until: If given, the callback won't be called after this
number of hits; it will be called up to and including this
number of hits.
If partitions have been set up (see :meth:`partition`), additional
keyword arguments can be given as ``{partition_name}={group}``. In
this case, the hits are filtered down to those that match the
partition before issuing callbacks. For example, we can run some
code on every 100th GET request after the first 1000 like this::
counter.partition('method', lambda r: r.method.upper())
@counter.every(100, after=1000, method='GET')
def on_one_hundred_gets(method):
pass
Furthermore, if we wanted to issue a callback on every 100th
request of any specific method, we can do this::
@counter.every(100, method=counter.any)
def on_one_hundred(method):
pass
The above code is different from simply ``every(100, callback)``
in that ``every(100, callback)`` will call the callback on every
100th request received, while the example will call the callback
of every 100th request of a particular method (every 100th GET,
every 100th PUT, every 100th POST etc).
Whenever partition specs are used to register callbacks,
then the callback must take a named argument matching the
partition name, which will contain the partition group for the
request that triggered the callback.
"""
def decorator(callback):
self._register_cb('before', n, callback, args)
return callback
if callback is not None:
return decorator(callback)
else:
return decorator
def after_every(self, n, callback=None, **args):
"""
Call a callback after every *n* hits.
This method works exactly like :meth:`every` except that
callbacks registered with :meth:`every` are called before the
request is handled (and therefore can throw errors that interupt
the request) while callbacks registered with this function are
run after a request has been handled.
"""
def decorator(callback):
self._register_cb('after', n, callback, args)
return callback
if callback is not None:
return decorator(callback)
else:
return decorator
def at(self, n, callback=None, **args):
"""
Call a callback on the *nth* hit.
:param resource: If given, the callback will be called on every
*n* hits to the resource.
Like :meth:`every`, this function can be called with partition
specifications.
This function is equivalent to ``every(1, after=n-1, until=n)``
"""
return self.every(1, callback=callback, after=n-1, until=n, **args)
def after(self, n, callback=None, **args):
"""
Call a callback after the *nth* hit.
This method works exactly like :meth:`at` except that
callbacks registered with :meth:`at` are called before the
request is handled (and therefore can throw errors that interupt
the request) while callbacks registered with this function are
run after a request has been handled.
"""
return self.after_every(1, callback=callback,
after=n-1, until=n,
**args)
def hits(self, resource=None):
"""
Get the hits that have been recorded by the counter.
The result can be used to query the number of
total hits to the application or resource, as well as the number
of hits belonging to specific partition groups::
# Get the total number of hits
counter.hits().count()
# Get the number of hits belonging to a partition group
counter.hits().count(method='GET')
The result is also an iterable of (:class:`datetime.datetime`,
*partition_mapping*) objects.
:param resource: If given, only hits for this resource will be
retrieved.
"""
if resource is None:
return reduce(
lambda x, y: x + y,
self.logs.values(),
self.log_cls(self.duration, None)
)
else:
self.logs.setdefault(
resource.name, self.log_cls(self.duration, resource))
return self.logs[resource.name]
def __call__(self):
# Calling the counter registers a 'hit'.
request = ctx.request
resource = ctx.resource
self.logs.setdefault(
resource.name, self.log_cls(self.duration, resource))
hit_log = self.logs[resource.name]
partitions = {
name: func(request)
for name, func in self.partitioners.items()
}
hit_log.track(partitions)
fire_callbacks = partial(self._fire_cb_funcs, hit_log, resource,
partitions)
fire_callbacks('before')
yield
fire_callbacks('after')
def _fire_cb_funcs(self, hit_log, resource, partitions, group):
callbacks = self.callbacks[group]
callbacks.setdefault(resource.name, [])
callbacks = chain(callbacks[resource.name], callbacks[None])
for cb_func, n, args in callbacks:
# {'ip': counter.any, 'method': 'PUT'}
partby = {a: args[a] for a in args if a in self.partitioners}
# {'ip': '255.215.213.32', 'method': 'GET'}
request_vals = {k: partitions[k] for k in partby}
count = hit_log.count(**request_vals)
if partby:
# Actually verify that the callback restrictions apply to
# this request
unmatched = [p for p, v in partby.items()
if not (v == self.any or v == partitions[p])]
if unmatched:
continue
if 'until' in args and args['until'] < count:
continue
if 'after' in args and count <= args['after']:
continue
if (count - args.get('after', 0) - 1) % n == 0:
cb_func(**request_vals)
class AbstractLog(metaclass=ABCMeta):
"""
Abstract base for a storage class for hit records.
This module provides a thread-safe, in-memory concrete implementation
that is used by default.
"""
@abstractmethod
def __init__(self, duration, resource):
"""
Initialize the abstract log
All implementations must support this signature for their
constructor.
:param duration: The length of time for which the log should
store records. Or if -1 is given, the log should store all
records indefinitely.
:type duration: :class:`datetime.timedelta` or int representing
seconds.
:param resource: The resource for which the log will store records.
"""
@abstractmethod
def __iter__(self):
"""
Iter the stored hits.
Each item iterated must be a 2-tuple in the form
(:class:`datetime.datetime`, partitions).
"""
@abstractmethod
def track(self, partitions):
"""
Store a hit record
:param partitions: A mapping from partition names to the group
that the hit matches for the partition. See
:meth:`Counter.partition`.
"""
@abstractmethod
def count(self, **partition_spec):
"""
Return the number of hits stored.
If no keyword arguments are given, then the total number of hits
stored should be returned. Otherwise, keyword arguments must be
in the form ``{partition_name}={group}``. See
:meth:`Counter.partition`.
"""
def __add__(self, other):
if isinstance(other, AbstractLog):
return _CompositeLog(self, other)
else:
return NotImplemented
class _CompositeLog(AbstractLog):
# This isn't really a storage class so much as it's a convenience
# class for stitching logs together
def __init__(self, first, second, *others):
self._logs = [first, second]
self._logs.extend(others)
def __iter__(self):
yield from chain.from_iterable(self._logs)
def track(self, partitions):
raise NotImplementedError("Composite log is read only.")
def count(self, **partitions):
return sum(map(lambda l: l.count(**partitions), self._logs))
class _HitLog(AbstractLog):
# This is a storage class that keep track of the hits that have
# occurred over a given duration.
# This particular implementation keeps track of hits in-memory.
def __init__(self, duration, _):
self._hits = []
self._delta = duration \
if isinstance(duration, timedelta) \
else timedelta(seconds=duration)
self._thread_lock = Lock()
self._counter = PyCounter()
def _prune(self):
if self._delta.total_seconds() < 0:
# negative seconds means keep everything.
return
now = datetime.now()
with self._thread_lock:
while self._hits and (now - self._hits[0][0]) > self._delta:
time, pickled_counter_keys = heapq.heappop(self._hits)
self._counter.subtract(pickle.loads(pickled_counter_keys))
def _generate_counter_keys(self, partitions):
sub_keys = chain.from_iterable(
combinations(partitions, r) for r in range(1, len(partitions)+1)
)
for key_list in sub_keys:
counter_key = tuple(
sorted(map(lambda k: (k, partitions[k]), key_list))
)
yield counter_key
def track(self, partitions):
now = datetime.now()
with self._thread_lock:
counter_keys = tuple(self._generate_counter_keys(partitions))
heapq.heappush(self._hits, (now, pickle.dumps(counter_keys)))
self._counter.update(counter_keys)
def count(self, **partitions):
self._prune()
if not partitions:
return len(self._hits)
else:
counter_key = tuple(sorted(partitions.items()))
return self._counter[counter_key]
def __add__(self, other):
if isinstance(other, _HitLog):
if self._delta != other._delta:
return NotImplemented
else:
new_log = _HitLog(self._delta, None)
new_log._hits.extend(self._hits)
new_log._hits.extend(other._hits)
heapq.heapify(new_log._hits)
new_log._counter.update(self._counter)
new_log._counter.update(other._counter)
return new_log
else:
return NotImplemented
def __iter__(self):
ascending = heapq.nsmallest(self.count(), self._hits)
for time, partitions in ascending:
yield Hit(time, partitions)
def __len__(self):
return self.count()
def __repr__(self):
return "HitLog({})".format(self.count())
Hit = namedtuple("Hit", "time parts")
|
|
import collections
import functools
import math
from dimagi.utils import parsing as dateparse
from datetime import datetime, timedelta
from casexml.apps.stock import const
from casexml.apps.stock.models import StockTransaction
from dimagi.utils.dates import force_to_datetime
DEFAULT_CONSUMPTION_FUNCTION = lambda case_id, product_id: None
class ConsumptionConfiguration(object):
DEFAULT_MIN_PERIODS = 2
DEFAULT_MIN_WINDOW = 10
DEFAULT_MAX_WINDOW = 60
def __init__(self, min_periods=None, min_window=None, max_window=None,
default_monthly_consumption_function=None):
def _default_if_none(value, default):
return value if value is not None else default
# the minimum number of consumption periods to include in a calculation
# periods are intervals between stock reports
self.min_periods = _default_if_none(min_periods, self.DEFAULT_MIN_PERIODS)
# the minimum total time of consumption data to include (in days)
# consumption should resort to static defaults if less than this
# amount of data is available
self.min_window = _default_if_none(min_window, self.DEFAULT_MIN_WINDOW)
# the maximum time to look backwards for consumption data (in days)
# data before this period will not be included in the calculation
self.max_window = _default_if_none(max_window, self.DEFAULT_MAX_WINDOW)
self.default_monthly_consumption_function = _default_if_none(default_monthly_consumption_function,
DEFAULT_CONSUMPTION_FUNCTION)
@classmethod
def test_config(cls):
return cls(0, 0, 60)
def from_ts(dt):
# damn this is ugly
if isinstance(dt, datetime):
return dt.replace(tzinfo=None)
if len(dt) > 20 and dt.endswith('Z'):
# deal with invalid timestamps (where are these coming from?)
dt = dt[:-1]
return dateparse.string_to_datetime(dt).replace(tzinfo=None)
to_ts = dateparse.json_format_datetime
def span_days(start, end):
span = end - start
return span.days + span.seconds / 86400.
def compute_consumption(case_id,
product_id,
window_end,
section_id=const.SECTION_TYPE_STOCK,
configuration=None):
"""
Computes the consumption for a product at a supply point.
Can optionally pass a section_id, but by default the 'stock'
value is used for computation.
Returns None if there is insufficient history.
"""
configuration = configuration or ConsumptionConfiguration()
window_start = window_end - timedelta(days=configuration.max_window)
transactions = get_transactions(
case_id,
product_id,
section_id,
window_start,
window_end
)
return compute_consumption_from_transactions(
transactions, window_start, configuration
)
def compute_consumption_or_default(case_id,
product_id,
window_end,
section_id=const.SECTION_TYPE_STOCK,
configuration=None):
"""
Used when it's not important to know if the consumption
value is real or just a default value
"""
configuration = configuration or ConsumptionConfiguration()
consumption = compute_consumption(
case_id,
product_id,
window_end,
section_id,
configuration
)
if consumption:
return consumption
else:
return compute_default_monthly_consumption(
case_id,
product_id,
configuration,
)
def compute_default_monthly_consumption(case_id, product_id, configuration):
return configuration.default_monthly_consumption_function(
case_id,
product_id,
)
def get_transactions(case_id, product_id, section_id, window_start, window_end):
"""
Given a case/product pair, get transactions in a format ready for consumption calc
"""
# todo: get rid of this middle layer once the consumption calc has
# been updated to deal with the regular transaction objects
SimpleTransaction = collections.namedtuple('SimpleTransaction', ['action', 'value', 'received_on'])
def _to_consumption_tx(txn):
if txn.type in (const.TRANSACTION_TYPE_STOCKONHAND, const.TRANSACTION_TYPE_STOCKOUT):
value = txn.stock_on_hand
else:
assert txn.type in (const.TRANSACTION_TYPE_RECEIPTS, const.TRANSACTION_TYPE_CONSUMPTION)
value = math.fabs(txn.quantity)
return SimpleTransaction(
action=txn.type,
value=value,
received_on=txn.report.date,
)
# todo: beginning of window date filtering
db_transactions = StockTransaction.objects.filter(
case_id=case_id, product_id=product_id,
report__date__gt=window_start,
report__date__lte=force_to_datetime(window_end),
section_id=section_id,
).order_by('report__date', 'pk')
first = True
for db_tx in db_transactions:
# for the very first transaction, include the previous one if there as well
# to capture the data on the edge of the window
if first:
previous = db_tx.get_previous_transaction()
if previous:
yield _to_consumption_tx(db_tx)
first = False
yield _to_consumption_tx(db_tx)
def compute_consumption_from_transactions(transactions, window_start, configuration=None):
configuration = configuration or ConsumptionConfiguration()
class ConsumptionPeriod(object):
def __init__(self, tx):
self.start = from_ts(tx.received_on)
self.end = None
self.consumption = 0
def add(self, tx):
self.consumption += tx.value
def close_out(self, tx):
self.end = from_ts(tx.received_on)
@property
def length(self):
return span_days(self.start, self.end)
@property
def normalized_length(self):
return span_days(max(self.start, window_start), max(self.end, window_start))
@property
def normalized_consumption(self):
return float(self.consumption) * self.normalized_length / self.length
def split_periods(transactions):
period = None
for tx in transactions:
base_action_type = tx.action
is_stockout = (
base_action_type == 'stockout' or
(base_action_type == 'stockonhand' and tx.value == 0) or
(base_action_type == 'stockedoutfor' and tx.value > 0)
)
is_checkpoint = (base_action_type == 'stockonhand' and not is_stockout)
if is_checkpoint:
if period:
period.close_out(tx)
yield period
period = ConsumptionPeriod(tx)
elif is_stockout:
if period:
# throw out current period
period = None
elif base_action_type == 'consumption':
# TODO in the future it's possible we'll want to break this out by action_type, in order to track
# different kinds of consumption: normal vs losses, etc.
if period:
period.add(tx)
periods = list(split_periods(transactions))
# exclude periods that occur entirely before the averaging window
periods = filter(lambda period: period.normalized_length, periods)
total_consumption = sum(period.normalized_consumption for period in periods)
total_length = sum(period.normalized_length for period in periods)
# check minimum statistical significance thresholds
if len(periods) < configuration.min_periods or total_length < configuration.min_window:
return None
return total_consumption / float(total_length) if total_length else None
|
|
import os
import sys
import textwrap
import pytest
from tests.lib import (
assert_all_changes, pyversion, _create_test_package,
_change_test_package_version,
)
from tests.lib.local_repos import local_checkout
def test_no_upgrade_unless_requested(script):
"""
No upgrade if not specifically requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools', expect_error=True)
assert not result.files_created, (
'pip install INITools upgraded when it should not have'
)
def test_upgrade_to_specific_version(script):
"""
It does upgrade to specific version requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert result.files_created, (
'pip install with specific version did not upgrade'
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_deleted
)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion in result.files_created
)
def test_upgrade_if_requested(script):
"""
And it does upgrade if requested.
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '--upgrade', 'INITools', expect_error=True)
assert result.files_created, 'pip install --upgrade did not upgrade'
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion not in result.files_created
)
def test_upgrade_with_newest_already_installed(script, data):
"""
If the newest version of a package is already installed, the package should
not be reinstalled and the user should be informed.
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple')
result = script.pip(
'install', '--upgrade', '-f', data.find_links, '--no-index', 'simple'
)
assert not result.files_created, 'simple upgraded when it should not have'
assert 'already up-to-date' in result.stdout, result.stdout
def test_upgrade_force_reinstall_newest(script):
"""
Force reinstallation of a package even if it is already at its newest
version if --force-reinstall is supplied.
"""
result = script.pip('install', 'INITools')
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install', '--upgrade', '--force-reinstall', 'INITools'
)
assert result2.files_updated, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
def test_uninstall_before_upgrade(script):
"""
Automatic uninstall-before-upgrade.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('install', 'INITools==0.3', expect_error=True)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
def test_uninstall_before_upgrade_from_url(script):
"""
Automatic uninstall-before-upgrade from URL.
"""
result = script.pip('install', 'INITools==0.2', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'http://pypi.python.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert result2.files_created, 'upgrade to INITools 0.3 failed'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
def test_upgrade_to_same_version_from_url(script):
"""
When installing from a URL the same version that is already installed, no
need to uninstall and reinstall if --upgrade is not specified.
"""
result = script.pip('install', 'INITools==0.3', expect_error=True)
assert script.site_packages / 'initools' in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip(
'install',
'http://pypi.python.org/packages/source/I/INITools/INITools-'
'0.3.tar.gz',
expect_error=True,
)
assert not result2.files_updated, 'INITools 0.3 reinstalled same version'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [script.venv / 'build', 'cache'])
def test_upgrade_from_reqs_file(script):
"""
Upgrade from a requirements file.
"""
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo<0.4
# and something else to test out:
INITools==0.3
"""))
install_result = script.pip(
'install', '-r', script.scratch_path / 'test-req.txt'
)
script.scratch_path.join("test-req.txt").write(textwrap.dedent("""\
PyLogo
# and something else to test out:
INITools
"""))
script.pip(
'install', '--upgrade', '-r', script.scratch_path / 'test-req.txt'
)
uninstall_result = script.pip(
'uninstall', '-r', script.scratch_path / 'test-req.txt', '-y'
)
assert_all_changes(
install_result,
uninstall_result,
[script.venv / 'build', 'cache', script.scratch / 'test-req.txt'],
)
def test_uninstall_rollback(script, data):
"""
Test uninstall-rollback (using test package with a setup.py
crafted to fail on install).
"""
result = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.1'
)
assert script.site_packages / 'broken.py' in result.files_created, list(
result.files_created.keys()
)
result2 = script.pip(
'install', '-f', data.find_links, '--no-index', 'broken==0.2broken',
expect_error=True,
)
assert result2.returncode == 1, str(result2)
assert script.run(
'python', '-c', "import broken; print(broken.VERSION)"
).stdout == '0.1\n'
assert_all_changes(
result.files_after,
result2,
[script.venv / 'build'],
)
# Issue #530 - temporarily disable flaky test
@pytest.mark.skipif
def test_editable_git_upgrade(script):
"""
Test installing an editable git package from a repository, upgrading the
repository, installing again, and check it gets the newer version
"""
version_pkg_path = _create_test_package(script)
script.pip(
'install', '-e',
'%s#egg=version_pkg' % ('git+file://' + version_pkg_path),
)
version = script.run('version_pkg')
assert '0.1' in version.stdout
_change_test_package_version(script, version_pkg_path)
script.pip(
'install', '-e',
'%s#egg=version_pkg' % ('git+file://' + version_pkg_path),
)
version2 = script.run('version_pkg')
assert 'some different version' in version2.stdout, (
"Output: %s" % (version2.stdout)
)
def test_should_not_install_always_from_cache(script):
"""
If there is an old cached package, pip should download the newer version
Related to issue #175
"""
script.pip('install', 'INITools==0.2', expect_error=True)
script.pip('uninstall', '-y', 'INITools')
result = script.pip('install', 'INITools==0.1', expect_error=True)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion not in result.files_created
)
assert (
script.site_packages / 'INITools-0.1-py%s.egg-info' %
pyversion in result.files_created
)
def test_install_with_ignoreinstalled_requested(script):
"""
Test old conflicting package is completely ignored
"""
script.pip('install', 'INITools==0.1', expect_error=True)
result = script.pip('install', '-I', 'INITools==0.3', expect_error=True)
assert result.files_created, 'pip install -I did not install'
# both the old and new metadata should be present.
assert os.path.exists(
script.site_packages_path / 'INITools-0.1-py%s.egg-info' % pyversion
)
assert os.path.exists(
script.site_packages_path / 'INITools-0.3-py%s.egg-info' % pyversion
)
def test_upgrade_vcs_req_with_no_dists_found(script, tmpdir):
"""It can upgrade a VCS requirement that has no distributions otherwise."""
req = "%s#egg=pip-test-package" % local_checkout(
"git+http://github.com/pypa/pip-test-package.git",
tmpdir.join("cache"),
)
script.pip("install", req)
result = script.pip("install", "-U", req)
assert not result.returncode
def test_upgrade_vcs_req_with_dist_found(script):
"""It can upgrade a VCS requirement that has distributions on the index."""
# TODO(pnasrat) Using local_checkout fails on windows - oddness with the
# test path urls/git.
req = (
"%s#egg=pretend" %
(
"git+git://github.com/alex/pretend@e7f26ad7dbcb4a02a4995aade4"
"743aad47656b27"
)
)
script.pip("install", req)
result = script.pip("install", "-U", req)
assert "pypi.python.org" not in result.stdout, result.stdout
class TestUpgradeSetuptools(object):
"""
Tests for upgrading to setuptools (using pip from src tree)
The tests use a *fixed* set of packages from our test packages dir
note: virtualenv-1.9.1 contains distribute-0.6.34
note: virtualenv-1.10 contains setuptools-0.9.7
"""
def prep_ve(self, script, version, pip_src, distribute=False):
self.script = script
self.script.pip_install_local('virtualenv==%s' % version)
args = ['virtualenv', self.script.scratch_path / 'VE']
if distribute:
args.insert(1, '--distribute')
if version == "1.9.1" and not distribute:
# setuptools 0.6 didn't support PYTHONDONTWRITEBYTECODE
del self.script.environ["PYTHONDONTWRITEBYTECODE"]
self.script.run(*args)
if sys.platform == 'win32':
bindir = "Scripts"
else:
bindir = "bin"
self.ve_bin = self.script.scratch_path / 'VE' / bindir
self.script.run(self.ve_bin / 'pip', 'uninstall', '-y', 'pip')
self.script.run(
self.ve_bin / 'python', 'setup.py', 'install',
cwd=pip_src,
expect_stderr=True,
)
@pytest.mark.skipif("sys.version_info >= (3,0)")
def test_py2_from_setuptools_6_to_setuptools_7(
self, script, data, virtualenv):
self.prep_ve(script, '1.9.1', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-use-wheel', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert (
"Found existing installation: setuptools 0.6c11" in result.stdout
)
result = self.script.run(self.ve_bin / 'pip', 'list')
"setuptools (0.9.8)" in result.stdout
def test_py2_py3_from_distribute_6_to_setuptools_7(
self, script, data, virtualenv):
self.prep_ve(
script, '1.9.1', virtualenv.pip_source_dir, distribute=True
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert (
"Found existing installation: distribute 0.6.34" in result.stdout
)
result = self.script.run(self.ve_bin / 'pip', 'list')
"setuptools (0.9.8)" in result.stdout
"distribute (0.7.3)" in result.stdout
def test_from_setuptools_7_to_setuptools_7(self, script, data, virtualenv):
self.prep_ve(script, '1.10', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.7" in result.stdout
result = self.script.run(self.ve_bin / 'pip', 'list')
"setuptools (0.9.8)" in result.stdout
def test_from_setuptools_7_to_setuptools_7_using_wheel(
self, script, data, virtualenv):
self.prep_ve(script, '1.10', virtualenv.pip_source_dir)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--use-wheel', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.7" in result.stdout
# only wheels use dist-info
assert 'setuptools-0.9.8.dist-info' in str(result.files_created)
result = self.script.run(self.ve_bin / 'pip', 'list')
"setuptools (0.9.8)" in result.stdout
# disabling intermittent travis failure:
# https://github.com/pypa/pip/issues/1379
@pytest.mark.skipif("hasattr(sys, 'pypy_version_info')")
def test_from_setuptools_7_to_setuptools_7_with_distribute_7_installed(
self, script, data, virtualenv):
self.prep_ve(
script, '1.9.1', virtualenv.pip_source_dir, distribute=True
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, 'setuptools==0.9.6'
)
result = self.script.run(self.ve_bin / 'pip', 'list')
"setuptools (0.9.6)" in result.stdout
"distribute (0.7.3)" in result.stdout
result = self.script.run(
self.ve_bin / 'pip', 'install', '--no-index',
'--find-links=%s' % data.find_links, '-U', 'setuptools'
)
assert "Found existing installation: setuptools 0.9.6" in result.stdout
result = self.script.run(self.ve_bin / 'pip', 'list')
"setuptools (0.9.8)" in result.stdout
"distribute (0.7.3)" in result.stdout
|
|
import base64
import datetime
import logging
import os
import pickle
import re
import hiyapyco
import sarge
from simple_salesforce import Salesforce
import yaml
import requests
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
from github3 import login
from Crypto import Random
from Crypto.Cipher import AES
from cumulusci.core.exceptions import ConfigError
from cumulusci.core.exceptions import NotInProject
from cumulusci.core.exceptions import KeychainConnectedAppNotFound
from cumulusci.core.exceptions import ProjectConfigNotFound
from cumulusci.core.exceptions import ScratchOrgException
from cumulusci.core.exceptions import SOQLQueryException
from cumulusci.core.exceptions import KeychainNotFound
from cumulusci.oauth.salesforce import SalesforceOAuth2
__location__ = os.path.dirname(os.path.realpath(__file__))
class BaseConfig(object):
""" Base class for all configuration objects """
defaults = {}
search_path = ['config']
def __init__(self, config=None):
if config is None:
self.config = {}
else:
self.config = config
self._init_logger()
self._load_config()
def _init_logger(self):
""" Initializes self.logger """
self.logger = logging.getLogger(__name__)
def _load_config(self):
""" Performs the logic to initialize self.config """
pass
def __getattr__(self, name):
tree = name.split('__')
if name.startswith('_'):
raise AttributeError('Attribute {} not found'.format(name))
value = None
value_found = False
for attr in self.search_path:
config = getattr(self, attr)
if len(tree) > 1:
# Walk through the config dictionary using __ as a delimiter
for key in tree[:-1]:
config = config.get(key)
if config is None:
break
if config is None:
continue
if tree[-1] in config:
value = config[tree[-1]]
value_found = True
break
if value_found:
return value
else:
return self.defaults.get(name)
class TaskConfig(BaseConfig):
""" A task with its configuration merged """
pass
class FlowConfig(BaseConfig):
""" A flow with its configuration merged """
pass
class BaseTaskFlowConfig(BaseConfig):
""" Base class for all configs that contain tasks and flows """
def list_tasks(self):
""" Returns a list of task info dictionaries with keys 'name' and 'description' """
tasks = []
for task in self.tasks.keys():
task_info = self.tasks[task]
if not task_info:
task_info = {}
tasks.append({
'name': task,
'description': task_info.get('description'),
})
return tasks
def get_task(self, name):
""" Returns a TaskConfig """
config = getattr(self, 'tasks__{}'.format(name))
return TaskConfig(config)
def list_flows(self):
""" Returns a list of flow info dictionaries with keys 'name' and 'description' """
flows = []
return flows
def get_flow(self, name):
""" Returns a FlowConfig """
config = getattr(self, 'flows__{}'.format(name))
return FlowConfig(config)
class BaseProjectConfig(BaseTaskFlowConfig):
""" Base class for a project's configuration which extends the global config """
search_path = ['config']
def __init__(self, global_config_obj):
self.global_config_obj = global_config_obj
self.keychain = None
super(BaseProjectConfig, self).__init__()
@property
def config_global_local(self):
return self.global_config_obj.config_global_local
@property
def config_global(self):
return self.global_config_obj.config_global
@property
def repo_root(self):
root = None
pwd = os.getcwd().split(os.sep)
while pwd:
if os.path.isdir(os.path.join(os.sep, os.path.join(*pwd),'.git')):
break
else:
pwd.pop()
if pwd:
return os.path.join(os.sep, os.path.join(*pwd))
@property
def repo_name(self):
if not self.repo_root:
return
in_remote_origin = False
with open(os.path.join(self.repo_root, '.git', 'config'), 'r') as f:
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and line.find('url =') != -1:
line_parts = line.split('/')
repo_name = line_parts[-1]
if repo_name.endswith('.git'):
repo_name = repo_name[:-4]
return repo_name
@property
def repo_url(self):
if not self.repo_root:
return
git_config_file = os.path.join(self.repo_root, '.git', 'config')
with open(git_config_file, 'r') as f:
in_remote_origin = False
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and 'url = ' in line:
return line[7:]
@property
def repo_owner(self):
if not self.repo_root:
return
in_remote_origin = False
with open(os.path.join(self.repo_root, '.git', 'config'), 'r') as f:
for line in f:
line = line.strip()
if line == '[remote "origin"]':
in_remote_origin = True
continue
if in_remote_origin and line.find('url =') != -1:
line_parts = line.split('/')
return line_parts[-2].split(':')[-1]
@property
def repo_branch(self):
if not self.repo_root:
return
with open(os.path.join(self.repo_root, '.git', 'HEAD'), 'r') as f:
branch_ref = f.read().strip()
if branch_ref.startswith('ref: '):
return '/'.join(branch_ref[5:].split('/')[2:])
@property
def repo_commit(self):
if not self.repo_root:
return
branch = self.repo_branch
if not branch:
return
join_args = [self.repo_root, '.git', 'refs', 'heads']
join_args.extend(branch.split('/'))
commit_file = os.path.join(*join_args)
commit_sha = None
if os.path.isfile(commit_file):
with open(commit_file, 'r') as f:
commit_sha = f.read().strip()
else:
packed_refs_path = os.path.join(
self.repo_root,
'.git',
'packed-refs'
)
with open(packed_refs_path, 'r') as f:
for line in f:
parts = line.split(' ')
if len(parts) == 1:
# Skip lines showing the commit sha of a tag on the preceeding line
continue
if parts[1].replace('refs/remotes/origin/', '').strip() == branch:
commit_sha = parts[0]
break
return commit_sha
def get_latest_version(self, beta=None):
""" Query Github Releases to find the latest production or beta release """
github_config = self.keychain.get_service('github')
gh = login(github_config.username, github_config.password)
repo = gh.repository(self.repo_owner, self.repo_name)
latest_version = None
for release in repo.iter_releases():
if beta:
if 'Beta' not in release.tag_name:
continue
else:
if 'Beta' in release.tag_name:
continue
version = self.get_version_for_tag(release.tag_name)
if version is None:
continue
version = LooseVersion(version)
if not latest_version or version > latest_version:
latest_version = version
return latest_version
@property
def config_project_path(self):
if not self.repo_root:
return
path = os.path.join(self.repo_root, self.config_filename)
if os.path.isfile(path):
return path
@property
def project_local_dir(self):
""" location of the user local directory for the project
e.g., ~/.cumulusci/NPSP-Extension-Test/ """
# depending on where we are in bootstrapping the YamlGlobalConfig
# the canonical projectname could be located in one of two places
if self.project__name:
name = self.project__name
else:
try:
name = self.config_project['project']['name']
except KeyError:
name = ''
path = os.path.join(
os.path.expanduser('~'),
self.global_config_obj.config_local_dir,
name,
)
if not os.path.isdir(path):
os.makedirs(path)
return path
def get_tag_for_version(self, version):
if '(Beta' in version:
tag_version = version.replace(' (','-').replace(')','').replace(' ','_')
tag_name = self.project__git__prefix_beta + tag_version
else:
tag_name = self.project__git__prefix_release + version
return tag_name
def get_version_for_tag(self, tag):
if not tag.startswith(self.project__git__prefix_beta) and not tag.startswith(self.project__git__prefix_release):
return None
if 'Beta' in tag:
version = tag[len(self.project__git__prefix_beta):]
version = version.replace('-',' (').replace('_',' ') + ')'
else:
version = tag[len(self.project__git__prefix_release):]
return version
def set_keychain(self, keychain):
self.keychain = keychain
def _check_keychain(self):
if not self.keychain:
raise KeychainNotFound('Could not find config.keychain. You must call config.set_keychain(keychain) before accessing orgs')
def list_orgs(self):
""" Returns a list of all org names for the project """
self._check_keychain()
return self.keychain.list_orgs()
def get_org(self, name):
""" Returns an OrgConfig for the given org_name """
self._check_keychain()
return self.keychain.get_org(name)
def set_org(self, name, org_config):
""" Creates or updates an org's oauth info """
self._check_keychain()
return self.keychain.set_org(name, org_config)
class BaseGlobalConfig(BaseTaskFlowConfig):
""" Base class for the global config which contains all configuration not specific to projects """
project_config_class = BaseProjectConfig
config_local_dir = '.cumulusci'
def list_projects(self):
""" Returns a list of project names """
raise NotImplementedError('Subclasses must provide an implementation')
def get_project_config(self):
""" Returns a ProjectConfig for the given project """
return self.project_config_class(self)
def create_project(self, project_name, config):
""" Creates a new project configuration and returns it """
raise NotImplementedError('Subclasses must provide an implementation')
class ConnectedAppOAuthConfig(BaseConfig):
""" Salesforce Connected App OAuth configuration """
pass
class OrgConfig(BaseConfig):
""" Salesforce org configuration (i.e. org credentials) """
def refresh_oauth_token(self, connected_app):
client_id = self.client_id
client_secret = self.client_secret
if not client_id:
client_id = connected_app.client_id
client_secret = connected_app.client_secret
sf_oauth = SalesforceOAuth2(
client_id,
client_secret,
connected_app.callback_url, # Callback url isn't really used for this call
auth_site=self.instance_url,
)
resp = sf_oauth.refresh_token(self.refresh_token).json()
if resp != self.config:
self.config.update(resp)
self._load_userinfo()
@property
def start_url(self):
start_url = '%s/secur/frontdoor.jsp?sid=%s' % (self.instance_url, self.access_token)
return start_url
@property
def user_id(self):
return self.id.split('/')[-1]
@property
def org_id(self):
return self.id.split('/')[-2]
@property
def username(self):
""" Username for the org connection. """
return self.userinfo__preferred_username
def load_userinfo(self):
self._load_userinfo()
def _load_userinfo(self):
headers = {"Authorization":"Bearer " + self.access_token}
response = requests.get(self.instance_url+"/services/oauth2/userinfo", headers=headers)
if response != self.config.get('userinfo', {}):
self.config.update({'userinfo':response.json()})
class ScratchOrgConfig(OrgConfig):
""" Salesforce DX Scratch org configuration """
@property
def scratch_info(self):
if hasattr(self, '_scratch_info'):
return self._scratch_info
# Create the org if it hasn't already been created
if not self.created:
self.create_org()
self.logger.info('Getting scratch org info from Salesforce DX')
# Call force:org:open and parse output to get instance_url and access_token
command = 'heroku force:org:open -d -u {}'.format(self.username)
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1))
p.run()
org_info = None
stdout_list = []
for line in p.stdout:
if line.startswith('Access org'):
org_info = line.strip()
stdout_list.append(line.strip())
if p.returncode:
message = 'Return code: {}\nstdout: {}\nstderr: {}'.format(
p.returncode,
'\n'.join(stdout_list),
p.stderr,
)
self.logger.error(message)
raise ScratchOrgException(message)
if not org_info:
message = 'Did not find org info in command output:\n{}'.format(p.stdout)
self.logger.error(message)
raise ScratchOrgException(message)
# OrgID is the third word of the output
org_id = org_info.split(' ')[2]
# Username is the sixth word of the output
username = org_info.split(' ')[5]
info_parts = org_info.split('following URL: ')
if len(info_parts) == 1:
message = 'Did not find org info in command output:\n{}'.format(p.stdout)
self.logger.error(message)
raise ScratchOrgException(message)
instance_url, access_token = info_parts[1].split('/secur/frontdoor.jsp?sid=')
self._scratch_info = {
'instance_url': instance_url,
'access_token': access_token,
'org_id': org_id,
'username': username,
}
self._scratch_info_date = datetime.datetime.now()
return self._scratch_info
@property
def access_token(self):
return self.scratch_info['access_token']
@property
def instance_url(self):
return self.scratch_info['instance_url']
@property
def org_id(self):
org_id = self.config.get('org_id')
if not org_id:
org_id = self.scratch_info['org_id']
return org_id
@property
def user_id(self):
if not self.config.get('user_id'):
sf = Salesforce(
instance=self.instance_url.replace('https://', ''),
session_id=self.access_token,
version='38.0',
)
result = sf.query_all(
"SELECT Id FROM User WHERE UserName='{}'".format(
self.username
)
)
self.config['user_id'] = result['records'][0]['Id']
return self.config['user_id']
@property
def username(self):
username = self.config.get('username')
if not username:
username = self.scratch_info['username']
return username
def create_org(self):
""" Uses heroku force:org:create to create the org """
if not self.config_file:
# FIXME: raise exception
return
if not self.scratch_org_type:
self.config['scratch_org_type'] = 'workspace'
command = 'heroku force:org:create -t {} -f {}'.format(self.scratch_org_type, self.config_file)
self.logger.info('Creating scratch org with command {}'.format(command))
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1))
p.run()
org_info = None
re_obj = re.compile('Successfully created workspace org: (.+), username: (.+)')
stdout = []
for line in p.stdout:
match = re_obj.search(line)
if match:
self.config['org_id'] = match.group(1)
self.config['username'] = match.group(2)
stdout.append(line)
self.logger.info(line)
if p.returncode:
message = 'Failed to create scratch org: \n{}'.format(''.join(stdout))
raise ScratchOrgException(message)
# Flag that this org has been created
self.config['created'] = True
def delete_org(self):
""" Uses heroku force:org:delete to create the org """
if not self.created:
self.logger.info('Skipping org deletion: the scratch org has not been created')
return
command = 'heroku force:org:delete --force -u {}'.format(self.username)
self.logger.info('Deleting scratch org with command {}'.format(command))
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1))
p.run()
org_info = None
stdout = []
for line in p.stdout:
stdout.append(line)
if line.startswith('An error occurred deleting this org'):
self.logger.error(line)
else:
self.logger.info(line)
if p.returncode:
message = 'Failed to delete scratch org: \n{}'.format(''.join(stdout))
raise ScratchOrgException(message)
# Flag that this org has been created
self.config['created'] = False
self.config['username'] = None
def refresh_oauth_token(self, connected_app):
""" Use heroku force:org:open to refresh token instead of built in OAuth handling """
if hasattr(self, '_scratch_info'):
# Cache the scratch_info for 1 hour to avoid unnecessary calls out to heroku CLI
delta = datetime.datetime.now() - self._scratch_info_date
if delta.total_seconds() > 3600:
del self._scratch_info
# This triggers a refresh
self.scratch_info
class ServiceConfig(BaseConfig):
pass
class YamlProjectConfig(BaseProjectConfig):
config_filename = 'cumulusci.yml'
@property
def config_project_local_path(self):
path = os.path.join(
self.project_local_dir,
self.config_filename,
)
if os.path.isfile(path):
return path
def _load_config(self):
""" Loads the configuration for the project """
# Initialize the dictionaries for the individual configs
self.config_project = {}
self.config_project_local = {}
# Verify that we're in a project
repo_root = self.repo_root
if not repo_root:
raise NotInProject('No repository found in current path. You must be inside a repository to initialize the project configuration')
# Verify that the project's root has a config file
if not self.config_project_path:
raise ProjectConfigNotFound(
'The file {} was not found in the repo root: {}'.format(
self.config_filename,
repo_root
)
)
# Start the merged yaml config from the global and global local configs
merge_yaml = [self.global_config_obj.config_global_path]
if self.global_config_obj.config_global_local_path:
merge_yaml.append(self.global_config_obj.config_global_local_path)
# Load the project's yaml config file
with open(self.config_project_path, 'r') as f_config:
project_config = yaml.load(f_config)
if project_config:
self.config_project.update(project_config)
merge_yaml.append(self.config_project_path)
# Load the local project yaml config file if it exists
if self.config_project_local_path:
with open(self.config_project_local_path, 'r') as f_local_config:
local_config = yaml.load(f_local_config)
if local_config:
self.config_project_local.update(local_config)
merge_yaml.append(self.config_project_local_path)
self.config = hiyapyco.load(*merge_yaml, method=hiyapyco.METHOD_MERGE)
class YamlGlobalConfig(BaseGlobalConfig):
config_filename = 'cumulusci.yml'
config_local_dir = '.cumulusci'
project_config_class = YamlProjectConfig
def __init__(self):
self.config_global_local = {}
self.config_global = {}
super(YamlGlobalConfig, self).__init__()
@property
def config_global_local_path(self):
directory = os.path.join(
os.path.expanduser('~'),
self.config_local_dir,
)
if not os.path.exists(directory):
os.makedirs(directory)
config_path = os.path.join(
directory,
self.config_filename,
)
if not os.path.isfile(config_path):
return None
return config_path
def _load_config(self):
""" Loads the local configuration """
# load the global config
self._load_global_config()
merge_yaml = [self.config_global_path]
# Load the local config
if self.config_global_local_path:
config = yaml.load(open(self.config_global_local_path, 'r'))
self.config_global_local = config
if config:
merge_yaml.append(self.config_global_local_path)
self.config = hiyapyco.load(*merge_yaml, method=hiyapyco.METHOD_MERGE)
@property
def config_global_path(self):
return os.path.join( __location__, '..', self.config_filename)
def _load_global_config(self):
""" Loads the configuration for the project """
# Load the global cumulusci.yml file
with open(self.config_global_path, 'r') as f_config:
config = yaml.load(f_config)
self.config_global = config
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
op.inputs[2], op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), grad, op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
None,
nn_ops.conv2d(
op.inputs[0], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format")
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format")
return [None,
nn_ops.conv3d_backprop_filter_v2(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(grad,
op.inputs[1],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format")
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
None,
nn_ops.conv3d(op.inputs[0],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops._avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops._max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
array_ops.where(
x < 0., gen_nn_ops._elu_grad(grad, op.outputs[0] + 1),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
# There is no gradient for the labels
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
if grad_grad.op.type not in ("ZerosLike", "Zeros"):
logits = op.inputs[0]
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(grad_grad[:, None, :],
softmax[:, :, None]), axis=1)) * softmax)
return grad, None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
sparse_softmax_grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1], message="Currently there is no way to take the second "
"derivative of sparse_softmax_cross_entropy_with_logits due to the fused "
"implementation's interaction with tf.gradients()")
return _BroadcastMul(grad_0, sparse_softmax_grad_without_gradient), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius,
bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]), gen_nn_ops._avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype), array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops._max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_max_pool_grad(op.inputs[0], op.outputs[0],
grad_0, op.outputs[1],
op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon))
grad_offset: gradient for offset, which is sum(grad_y)
"""
return gen_nn_ops.fused_batch_norm_grad(
grad[0],
op.inputs[0],
op.inputs[1],
op.outputs[3],
op.outputs[4],
epsilon=op.get_attr("epsilon"),
data_format=op.get_attr("data_format"),
is_training=op.get_attr("is_training"))
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
|
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
from pprint import pformat
from datetime import datetime, timedelta, date, time
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.management import call_command
from dateutil import rrule
import swingtime
from swingtime import utils
from swingtime.models import *
from swingtime.forms import EventForm, MultipleOccurrenceForm
expected_table_1 = '''\
| 15:00 | | | | | |
| 15:15 | zelda | | | | |
| 15:30 | zelda | alpha | | | |
| 15:45 | | alpha | | | |
| 16:00 | bravo | alpha | foxtrot | | |
| 16:15 | bravo | alpha | foxtrot | charlie | |
| 16:30 | bravo | alpha | foxtrot | charlie | delta |
| 16:45 | | alpha | | charlie | delta |
| 17:00 | | alpha | | | delta |
| 17:15 | echo | alpha | | | |
| 17:30 | echo | alpha | | | |
| 17:45 | echo | | | | |
| 18:00 | | | | | |
'''
expected_table_2 = '''\
| 15:30 | zelda | alpha | | | |
| 15:45 | | alpha | | | |
| 16:00 | bravo | alpha | foxtrot | | |
| 16:15 | bravo | alpha | foxtrot | charlie | |
| 16:30 | bravo | alpha | foxtrot | charlie | delta |
| 16:45 | | alpha | | charlie | delta |
| 17:00 | | alpha | | | delta |
| 17:15 | echo | alpha | | | |
| 17:30 | echo | alpha | | | |
'''
expected_table_3 = '''\
| 16:00 | alpha | bravo | foxtrot | | |
| 16:15 | alpha | bravo | foxtrot | charlie | |
| 16:30 | alpha | bravo | foxtrot | charlie | delta |
| 16:45 | alpha | | | charlie | delta |
| 17:00 | alpha | | | | delta |
| 17:15 | alpha | echo | | | |
| 17:30 | alpha | echo | | | |
'''
expected_table_4 = '''\
| 18:00 | | | | |
| 18:15 | | | | |
| 18:30 | | | | |
| 18:45 | | | | |
| 19:00 | | | | |
| 19:15 | | | | |
| 19:30 | | | | |
'''
expected_table_5 = '''\
| 16:30 | alpha | bravo | foxtrot | charlie | delta |
'''
#===============================================================================
class TableTest(TestCase):
fixtures = ['swingtime_test.json']
#---------------------------------------------------------------------------
def setUp(self):
self._dt = dt = datetime(2008,12,11)
#---------------------------------------------------------------------------
def table_as_string(self, table):
timefmt = '| {:<5s} '
cellfmt = '| {:<8s} '
out = []
for tm, cells in table:
out.append(timefmt.format(tm.strftime('%H:%M')))
for cell in cells:
if cell:
out.append(cellfmt.format(cell.event.title))
else:
out.append(cellfmt.format(''))
out.append('|\n')
return ''.join(out)
#---------------------------------------------------------------------------
def _do_test(self, start, end, expect):
start = time(*start)
dtstart = datetime.combine(self._dt, start)
etd = datetime.combine(self._dt, time(*end)) - dtstart
table = utils.create_timeslot_table(self._dt, start_time=start, end_time_delta=etd)
actual = self.table_as_string(table)
out = 'Expecting:\n{0}\nActual:\n{1}'.format(expect, actual)
self.assertEqual(actual, expect, out)
#---------------------------------------------------------------------------
def test_slot_table_1(self):
self._do_test((15,0), (18,0), expected_table_1)
#---------------------------------------------------------------------------
def test_slot_table_2(self):
self._do_test((15,30), (17,30), expected_table_2)
#---------------------------------------------------------------------------
def test_slot_table_3(self):
self._do_test((16,0), (17,30), expected_table_3)
#---------------------------------------------------------------------------
def test_slot_table_4(self):
self._do_test((18,0), (19,30), expected_table_4)
#---------------------------------------------------------------------------
def test_slot_table_5(self):
self._do_test((16,30), (16,30), expected_table_5)
#===============================================================================
class NewEventFormTest(TestCase):
fixtures = ['swingtime_test']
#---------------------------------------------------------------------------
def test_new_event_simple(self):
data = dict(
title='QWERTY',
event_type='1',
day='2008-12-11',
start_time_delta='28800',
end_time_delta='29700',
year_month_ordinal_day='2',
month_ordinal_day='2',
year_month_ordinal='1',
month_option='each',
count=2,
repeats='count',
freq='2',
month_ordinal='1'
)
evt_form = EventForm(data)
occ_form = MultipleOccurrenceForm(data)
self.assertTrue(evt_form.is_valid(), evt_form.errors.as_text())
self.assertTrue(occ_form.is_valid(), occ_form.errors.as_text())
evt = occ_form.save(evt_form.save())
self.assertEqual(evt.occurrence_set.count(), 2)
self.assertEqual(
occ_form.cleaned_data['start_time'],
datetime(2008, 12, 11, 8),
'Bad start_time: {0}'.format(pformat(occ_form.cleaned_data))
)
#---------------------------------------------------------------------------
def test_freq(self):
et = EventType.objects.get(pk=1)
e = Event.objects.create(title='FIRE BAD!', description='***', event_type=et)
dtstart = datetime(2015,2,12)
data = dict(
day=dtstart.date(),
freq=rrule.MONTHLY,
month_option='on',
month_ordinal=1,
month_ordinal_day=5,
repeats='until',
start_time_delta='28800',
end_time_delta='29700',
until=datetime(2015, 6, 10)
)
mof = MultipleOccurrenceForm(data, initial={'dtstart': dtstart})
self.assertTrue(mof.is_valid(), mof.errors.as_text())
mof.save(e)
expected = [date(2015,m,d) for m,d in ((3, 6), (4, 3), (5, 1), (6, 5))]
actual = [o.start_time.date() for o in e.occurrence_set.all()]
self.assertEqual(expected, actual, '\nGOT:\n{}\n^\nEXP:\n{}'.format(pformat(actual), pformat(expected)))
#===============================================================================
class CreationTest(TestCase):
#---------------------------------------------------------------------------
def test_1(self):
et = EventType.objects.create(abbr='foo', label='Foo')
self.assertTrue(et.abbr == 'foo')
e = Event.objects.create(title='Hello, world', description='Happy New Year', event_type=et)
self.assertTrue(e.event_type == et)
self.assertEqual(e.get_absolute_url(), '/events/{}/'.format(e.id))
e.add_occurrences(datetime(2008,1,1), datetime(2008,1,1,1), freq=rrule.YEARLY, count=7)
occs = list(e.occurrence_set.all())
self.assertEqual(len(occs), 7)
self.assertEqual(str(occs[0]), 'Hello, world: 2008-01-01T00:00:00')
for i in range(7):
o = occs[i]
self.assertEqual(o.start_time.year, 2008 + i)
#---------------------------------------------------------------------------
def test_2(self):
et = EventType.objects.create(abbr='bar', label='Bar')
self.assertEqual(str(et), 'Bar')
e = create_event('Bicycle repairman', event_type=et)
self.assertEqual(str(e), 'Bicycle repairman')
self.assertEqual(e.occurrence_set.count(), 1)
self.assertEqual(e.daily_occurrences().count(), 1)
#---------------------------------------------------------------------------
def test_3(self):
e = create_event(
'Something completely different',
event_type=('abbr', 'Abbreviation'),
start_time=datetime(2008,12,1, 12),
note='Here it is',
freq=rrule.WEEKLY,
byweekday=(rrule.TU, rrule.TH),
until=datetime(2008,12,31)
)
self.assertIsInstance(e.event_type, EventType)
self.assertEqual(e.event_type.abbr, 'abbr')
self.assertEqual(str(e.notes.all()[0]), 'Here it is')
occs = list(e.occurrence_set.all())
for i, day in zip(range(len(occs)), [2, 4, 9, 11, 16, 18, 23, 25, 30]):
o = occs[i]
self.assertEqual(day, o.start_time.day)
#---------------------------------------------------------------------------
def test_4(self):
e = create_event('This parrot has ceased to be!', ('blue', 'Blue'), count=3)
occs = list(e.upcoming_occurrences())
self.assertEqual(len(occs), 2)
self.assertIsNotNone(e.next_occurrence())
self.assertEqual(occs[1].title, 'This parrot has ceased to be!')
def test_6(self):
et = EventType.objects.create(abbr='foo', label='Foo')
self.assertTrue(et.abbr == 'foo')
e = Event.objects.create(title='Yet another event', description="with tons of occurrences", event_type=et)
self.assertTrue(e.event_type == et)
self.assertEqual(e.get_absolute_url(), '/events/{}/'.format(e.id))
e.add_occurrences(datetime(2008,1,1), datetime(2008,1,1,1),
freq=rrule.DAILY, until=datetime(2020,12,31)) #
occs = list(e.occurrence_set.all())
self.assertEqual(len(occs), 4749)
#===============================================================================
class MiscTest(TestCase):
#---------------------------------------------------------------------------
def test_version(self):
V = swingtime.VERSION
self.assertEqual(swingtime.get_version(), '.'.join([str(i) for i in V]))
#---------------------------------------------------------------------------
def test_month_boundaries(self):
dt = datetime(2012,2,15)
start, end = utils.month_boundaries(dt)
self.assertEqual(start, datetime(2012,2,1))
self.assertEqual(end, datetime(2012,2,29))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.analytics.admin_v1alpha.types import analytics_admin
from google.analytics.admin_v1alpha.types import resources
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-analytics-admin",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AnalyticsAdminServiceTransport(abc.ABC):
"""Abstract transport class for AnalyticsAdminService."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/analytics.edit",
"https://www.googleapis.com/auth/analytics.manage.users",
"https://www.googleapis.com/auth/analytics.manage.users.readonly",
"https://www.googleapis.com/auth/analytics.readonly",
)
DEFAULT_HOST: str = "analyticsadmin.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get_account: gapic_v1.method.wrap_method(
self.get_account, default_timeout=None, client_info=client_info,
),
self.list_accounts: gapic_v1.method.wrap_method(
self.list_accounts, default_timeout=None, client_info=client_info,
),
self.delete_account: gapic_v1.method.wrap_method(
self.delete_account, default_timeout=None, client_info=client_info,
),
self.update_account: gapic_v1.method.wrap_method(
self.update_account, default_timeout=None, client_info=client_info,
),
self.provision_account_ticket: gapic_v1.method.wrap_method(
self.provision_account_ticket,
default_timeout=None,
client_info=client_info,
),
self.list_account_summaries: gapic_v1.method.wrap_method(
self.list_account_summaries,
default_timeout=None,
client_info=client_info,
),
self.get_property: gapic_v1.method.wrap_method(
self.get_property, default_timeout=None, client_info=client_info,
),
self.list_properties: gapic_v1.method.wrap_method(
self.list_properties, default_timeout=None, client_info=client_info,
),
self.create_property: gapic_v1.method.wrap_method(
self.create_property, default_timeout=None, client_info=client_info,
),
self.delete_property: gapic_v1.method.wrap_method(
self.delete_property, default_timeout=None, client_info=client_info,
),
self.update_property: gapic_v1.method.wrap_method(
self.update_property, default_timeout=None, client_info=client_info,
),
self.get_user_link: gapic_v1.method.wrap_method(
self.get_user_link, default_timeout=None, client_info=client_info,
),
self.batch_get_user_links: gapic_v1.method.wrap_method(
self.batch_get_user_links,
default_timeout=None,
client_info=client_info,
),
self.list_user_links: gapic_v1.method.wrap_method(
self.list_user_links, default_timeout=None, client_info=client_info,
),
self.audit_user_links: gapic_v1.method.wrap_method(
self.audit_user_links, default_timeout=None, client_info=client_info,
),
self.create_user_link: gapic_v1.method.wrap_method(
self.create_user_link, default_timeout=None, client_info=client_info,
),
self.batch_create_user_links: gapic_v1.method.wrap_method(
self.batch_create_user_links,
default_timeout=None,
client_info=client_info,
),
self.update_user_link: gapic_v1.method.wrap_method(
self.update_user_link, default_timeout=None, client_info=client_info,
),
self.batch_update_user_links: gapic_v1.method.wrap_method(
self.batch_update_user_links,
default_timeout=None,
client_info=client_info,
),
self.delete_user_link: gapic_v1.method.wrap_method(
self.delete_user_link, default_timeout=None, client_info=client_info,
),
self.batch_delete_user_links: gapic_v1.method.wrap_method(
self.batch_delete_user_links,
default_timeout=None,
client_info=client_info,
),
self.create_firebase_link: gapic_v1.method.wrap_method(
self.create_firebase_link,
default_timeout=None,
client_info=client_info,
),
self.delete_firebase_link: gapic_v1.method.wrap_method(
self.delete_firebase_link,
default_timeout=None,
client_info=client_info,
),
self.list_firebase_links: gapic_v1.method.wrap_method(
self.list_firebase_links, default_timeout=None, client_info=client_info,
),
self.get_global_site_tag: gapic_v1.method.wrap_method(
self.get_global_site_tag, default_timeout=None, client_info=client_info,
),
self.create_google_ads_link: gapic_v1.method.wrap_method(
self.create_google_ads_link,
default_timeout=None,
client_info=client_info,
),
self.update_google_ads_link: gapic_v1.method.wrap_method(
self.update_google_ads_link,
default_timeout=None,
client_info=client_info,
),
self.delete_google_ads_link: gapic_v1.method.wrap_method(
self.delete_google_ads_link,
default_timeout=None,
client_info=client_info,
),
self.list_google_ads_links: gapic_v1.method.wrap_method(
self.list_google_ads_links,
default_timeout=None,
client_info=client_info,
),
self.get_data_sharing_settings: gapic_v1.method.wrap_method(
self.get_data_sharing_settings,
default_timeout=None,
client_info=client_info,
),
self.get_measurement_protocol_secret: gapic_v1.method.wrap_method(
self.get_measurement_protocol_secret,
default_timeout=None,
client_info=client_info,
),
self.list_measurement_protocol_secrets: gapic_v1.method.wrap_method(
self.list_measurement_protocol_secrets,
default_timeout=None,
client_info=client_info,
),
self.create_measurement_protocol_secret: gapic_v1.method.wrap_method(
self.create_measurement_protocol_secret,
default_timeout=None,
client_info=client_info,
),
self.delete_measurement_protocol_secret: gapic_v1.method.wrap_method(
self.delete_measurement_protocol_secret,
default_timeout=None,
client_info=client_info,
),
self.update_measurement_protocol_secret: gapic_v1.method.wrap_method(
self.update_measurement_protocol_secret,
default_timeout=None,
client_info=client_info,
),
self.acknowledge_user_data_collection: gapic_v1.method.wrap_method(
self.acknowledge_user_data_collection,
default_timeout=None,
client_info=client_info,
),
self.search_change_history_events: gapic_v1.method.wrap_method(
self.search_change_history_events,
default_timeout=None,
client_info=client_info,
),
self.get_google_signals_settings: gapic_v1.method.wrap_method(
self.get_google_signals_settings,
default_timeout=None,
client_info=client_info,
),
self.update_google_signals_settings: gapic_v1.method.wrap_method(
self.update_google_signals_settings,
default_timeout=None,
client_info=client_info,
),
self.create_conversion_event: gapic_v1.method.wrap_method(
self.create_conversion_event,
default_timeout=None,
client_info=client_info,
),
self.get_conversion_event: gapic_v1.method.wrap_method(
self.get_conversion_event,
default_timeout=None,
client_info=client_info,
),
self.delete_conversion_event: gapic_v1.method.wrap_method(
self.delete_conversion_event,
default_timeout=None,
client_info=client_info,
),
self.list_conversion_events: gapic_v1.method.wrap_method(
self.list_conversion_events,
default_timeout=None,
client_info=client_info,
),
self.get_display_video360_advertiser_link: gapic_v1.method.wrap_method(
self.get_display_video360_advertiser_link,
default_timeout=None,
client_info=client_info,
),
self.list_display_video360_advertiser_links: gapic_v1.method.wrap_method(
self.list_display_video360_advertiser_links,
default_timeout=None,
client_info=client_info,
),
self.create_display_video360_advertiser_link: gapic_v1.method.wrap_method(
self.create_display_video360_advertiser_link,
default_timeout=None,
client_info=client_info,
),
self.delete_display_video360_advertiser_link: gapic_v1.method.wrap_method(
self.delete_display_video360_advertiser_link,
default_timeout=None,
client_info=client_info,
),
self.update_display_video360_advertiser_link: gapic_v1.method.wrap_method(
self.update_display_video360_advertiser_link,
default_timeout=None,
client_info=client_info,
),
self.get_display_video360_advertiser_link_proposal: gapic_v1.method.wrap_method(
self.get_display_video360_advertiser_link_proposal,
default_timeout=None,
client_info=client_info,
),
self.list_display_video360_advertiser_link_proposals: gapic_v1.method.wrap_method(
self.list_display_video360_advertiser_link_proposals,
default_timeout=None,
client_info=client_info,
),
self.create_display_video360_advertiser_link_proposal: gapic_v1.method.wrap_method(
self.create_display_video360_advertiser_link_proposal,
default_timeout=None,
client_info=client_info,
),
self.delete_display_video360_advertiser_link_proposal: gapic_v1.method.wrap_method(
self.delete_display_video360_advertiser_link_proposal,
default_timeout=None,
client_info=client_info,
),
self.approve_display_video360_advertiser_link_proposal: gapic_v1.method.wrap_method(
self.approve_display_video360_advertiser_link_proposal,
default_timeout=None,
client_info=client_info,
),
self.cancel_display_video360_advertiser_link_proposal: gapic_v1.method.wrap_method(
self.cancel_display_video360_advertiser_link_proposal,
default_timeout=None,
client_info=client_info,
),
self.create_custom_dimension: gapic_v1.method.wrap_method(
self.create_custom_dimension,
default_timeout=None,
client_info=client_info,
),
self.update_custom_dimension: gapic_v1.method.wrap_method(
self.update_custom_dimension,
default_timeout=None,
client_info=client_info,
),
self.list_custom_dimensions: gapic_v1.method.wrap_method(
self.list_custom_dimensions,
default_timeout=None,
client_info=client_info,
),
self.archive_custom_dimension: gapic_v1.method.wrap_method(
self.archive_custom_dimension,
default_timeout=None,
client_info=client_info,
),
self.get_custom_dimension: gapic_v1.method.wrap_method(
self.get_custom_dimension,
default_timeout=None,
client_info=client_info,
),
self.create_custom_metric: gapic_v1.method.wrap_method(
self.create_custom_metric,
default_timeout=None,
client_info=client_info,
),
self.update_custom_metric: gapic_v1.method.wrap_method(
self.update_custom_metric,
default_timeout=None,
client_info=client_info,
),
self.list_custom_metrics: gapic_v1.method.wrap_method(
self.list_custom_metrics, default_timeout=None, client_info=client_info,
),
self.archive_custom_metric: gapic_v1.method.wrap_method(
self.archive_custom_metric,
default_timeout=None,
client_info=client_info,
),
self.get_custom_metric: gapic_v1.method.wrap_method(
self.get_custom_metric, default_timeout=None, client_info=client_info,
),
self.get_data_retention_settings: gapic_v1.method.wrap_method(
self.get_data_retention_settings,
default_timeout=None,
client_info=client_info,
),
self.update_data_retention_settings: gapic_v1.method.wrap_method(
self.update_data_retention_settings,
default_timeout=None,
client_info=client_info,
),
self.create_data_stream: gapic_v1.method.wrap_method(
self.create_data_stream, default_timeout=None, client_info=client_info,
),
self.delete_data_stream: gapic_v1.method.wrap_method(
self.delete_data_stream, default_timeout=None, client_info=client_info,
),
self.update_data_stream: gapic_v1.method.wrap_method(
self.update_data_stream, default_timeout=None, client_info=client_info,
),
self.list_data_streams: gapic_v1.method.wrap_method(
self.list_data_streams, default_timeout=None, client_info=client_info,
),
self.get_data_stream: gapic_v1.method.wrap_method(
self.get_data_stream, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_account(
self,
) -> Callable[
[analytics_admin.GetAccountRequest],
Union[resources.Account, Awaitable[resources.Account]],
]:
raise NotImplementedError()
@property
def list_accounts(
self,
) -> Callable[
[analytics_admin.ListAccountsRequest],
Union[
analytics_admin.ListAccountsResponse,
Awaitable[analytics_admin.ListAccountsResponse],
],
]:
raise NotImplementedError()
@property
def delete_account(
self,
) -> Callable[
[analytics_admin.DeleteAccountRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def update_account(
self,
) -> Callable[
[analytics_admin.UpdateAccountRequest],
Union[resources.Account, Awaitable[resources.Account]],
]:
raise NotImplementedError()
@property
def provision_account_ticket(
self,
) -> Callable[
[analytics_admin.ProvisionAccountTicketRequest],
Union[
analytics_admin.ProvisionAccountTicketResponse,
Awaitable[analytics_admin.ProvisionAccountTicketResponse],
],
]:
raise NotImplementedError()
@property
def list_account_summaries(
self,
) -> Callable[
[analytics_admin.ListAccountSummariesRequest],
Union[
analytics_admin.ListAccountSummariesResponse,
Awaitable[analytics_admin.ListAccountSummariesResponse],
],
]:
raise NotImplementedError()
@property
def get_property(
self,
) -> Callable[
[analytics_admin.GetPropertyRequest],
Union[resources.Property, Awaitable[resources.Property]],
]:
raise NotImplementedError()
@property
def list_properties(
self,
) -> Callable[
[analytics_admin.ListPropertiesRequest],
Union[
analytics_admin.ListPropertiesResponse,
Awaitable[analytics_admin.ListPropertiesResponse],
],
]:
raise NotImplementedError()
@property
def create_property(
self,
) -> Callable[
[analytics_admin.CreatePropertyRequest],
Union[resources.Property, Awaitable[resources.Property]],
]:
raise NotImplementedError()
@property
def delete_property(
self,
) -> Callable[
[analytics_admin.DeletePropertyRequest],
Union[resources.Property, Awaitable[resources.Property]],
]:
raise NotImplementedError()
@property
def update_property(
self,
) -> Callable[
[analytics_admin.UpdatePropertyRequest],
Union[resources.Property, Awaitable[resources.Property]],
]:
raise NotImplementedError()
@property
def get_user_link(
self,
) -> Callable[
[analytics_admin.GetUserLinkRequest],
Union[resources.UserLink, Awaitable[resources.UserLink]],
]:
raise NotImplementedError()
@property
def batch_get_user_links(
self,
) -> Callable[
[analytics_admin.BatchGetUserLinksRequest],
Union[
analytics_admin.BatchGetUserLinksResponse,
Awaitable[analytics_admin.BatchGetUserLinksResponse],
],
]:
raise NotImplementedError()
@property
def list_user_links(
self,
) -> Callable[
[analytics_admin.ListUserLinksRequest],
Union[
analytics_admin.ListUserLinksResponse,
Awaitable[analytics_admin.ListUserLinksResponse],
],
]:
raise NotImplementedError()
@property
def audit_user_links(
self,
) -> Callable[
[analytics_admin.AuditUserLinksRequest],
Union[
analytics_admin.AuditUserLinksResponse,
Awaitable[analytics_admin.AuditUserLinksResponse],
],
]:
raise NotImplementedError()
@property
def create_user_link(
self,
) -> Callable[
[analytics_admin.CreateUserLinkRequest],
Union[resources.UserLink, Awaitable[resources.UserLink]],
]:
raise NotImplementedError()
@property
def batch_create_user_links(
self,
) -> Callable[
[analytics_admin.BatchCreateUserLinksRequest],
Union[
analytics_admin.BatchCreateUserLinksResponse,
Awaitable[analytics_admin.BatchCreateUserLinksResponse],
],
]:
raise NotImplementedError()
@property
def update_user_link(
self,
) -> Callable[
[analytics_admin.UpdateUserLinkRequest],
Union[resources.UserLink, Awaitable[resources.UserLink]],
]:
raise NotImplementedError()
@property
def batch_update_user_links(
self,
) -> Callable[
[analytics_admin.BatchUpdateUserLinksRequest],
Union[
analytics_admin.BatchUpdateUserLinksResponse,
Awaitable[analytics_admin.BatchUpdateUserLinksResponse],
],
]:
raise NotImplementedError()
@property
def delete_user_link(
self,
) -> Callable[
[analytics_admin.DeleteUserLinkRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def batch_delete_user_links(
self,
) -> Callable[
[analytics_admin.BatchDeleteUserLinksRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_firebase_link(
self,
) -> Callable[
[analytics_admin.CreateFirebaseLinkRequest],
Union[resources.FirebaseLink, Awaitable[resources.FirebaseLink]],
]:
raise NotImplementedError()
@property
def delete_firebase_link(
self,
) -> Callable[
[analytics_admin.DeleteFirebaseLinkRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_firebase_links(
self,
) -> Callable[
[analytics_admin.ListFirebaseLinksRequest],
Union[
analytics_admin.ListFirebaseLinksResponse,
Awaitable[analytics_admin.ListFirebaseLinksResponse],
],
]:
raise NotImplementedError()
@property
def get_global_site_tag(
self,
) -> Callable[
[analytics_admin.GetGlobalSiteTagRequest],
Union[resources.GlobalSiteTag, Awaitable[resources.GlobalSiteTag]],
]:
raise NotImplementedError()
@property
def create_google_ads_link(
self,
) -> Callable[
[analytics_admin.CreateGoogleAdsLinkRequest],
Union[resources.GoogleAdsLink, Awaitable[resources.GoogleAdsLink]],
]:
raise NotImplementedError()
@property
def update_google_ads_link(
self,
) -> Callable[
[analytics_admin.UpdateGoogleAdsLinkRequest],
Union[resources.GoogleAdsLink, Awaitable[resources.GoogleAdsLink]],
]:
raise NotImplementedError()
@property
def delete_google_ads_link(
self,
) -> Callable[
[analytics_admin.DeleteGoogleAdsLinkRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_google_ads_links(
self,
) -> Callable[
[analytics_admin.ListGoogleAdsLinksRequest],
Union[
analytics_admin.ListGoogleAdsLinksResponse,
Awaitable[analytics_admin.ListGoogleAdsLinksResponse],
],
]:
raise NotImplementedError()
@property
def get_data_sharing_settings(
self,
) -> Callable[
[analytics_admin.GetDataSharingSettingsRequest],
Union[resources.DataSharingSettings, Awaitable[resources.DataSharingSettings]],
]:
raise NotImplementedError()
@property
def get_measurement_protocol_secret(
self,
) -> Callable[
[analytics_admin.GetMeasurementProtocolSecretRequest],
Union[
resources.MeasurementProtocolSecret,
Awaitable[resources.MeasurementProtocolSecret],
],
]:
raise NotImplementedError()
@property
def list_measurement_protocol_secrets(
self,
) -> Callable[
[analytics_admin.ListMeasurementProtocolSecretsRequest],
Union[
analytics_admin.ListMeasurementProtocolSecretsResponse,
Awaitable[analytics_admin.ListMeasurementProtocolSecretsResponse],
],
]:
raise NotImplementedError()
@property
def create_measurement_protocol_secret(
self,
) -> Callable[
[analytics_admin.CreateMeasurementProtocolSecretRequest],
Union[
resources.MeasurementProtocolSecret,
Awaitable[resources.MeasurementProtocolSecret],
],
]:
raise NotImplementedError()
@property
def delete_measurement_protocol_secret(
self,
) -> Callable[
[analytics_admin.DeleteMeasurementProtocolSecretRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def update_measurement_protocol_secret(
self,
) -> Callable[
[analytics_admin.UpdateMeasurementProtocolSecretRequest],
Union[
resources.MeasurementProtocolSecret,
Awaitable[resources.MeasurementProtocolSecret],
],
]:
raise NotImplementedError()
@property
def acknowledge_user_data_collection(
self,
) -> Callable[
[analytics_admin.AcknowledgeUserDataCollectionRequest],
Union[
analytics_admin.AcknowledgeUserDataCollectionResponse,
Awaitable[analytics_admin.AcknowledgeUserDataCollectionResponse],
],
]:
raise NotImplementedError()
@property
def search_change_history_events(
self,
) -> Callable[
[analytics_admin.SearchChangeHistoryEventsRequest],
Union[
analytics_admin.SearchChangeHistoryEventsResponse,
Awaitable[analytics_admin.SearchChangeHistoryEventsResponse],
],
]:
raise NotImplementedError()
@property
def get_google_signals_settings(
self,
) -> Callable[
[analytics_admin.GetGoogleSignalsSettingsRequest],
Union[
resources.GoogleSignalsSettings, Awaitable[resources.GoogleSignalsSettings]
],
]:
raise NotImplementedError()
@property
def update_google_signals_settings(
self,
) -> Callable[
[analytics_admin.UpdateGoogleSignalsSettingsRequest],
Union[
resources.GoogleSignalsSettings, Awaitable[resources.GoogleSignalsSettings]
],
]:
raise NotImplementedError()
@property
def create_conversion_event(
self,
) -> Callable[
[analytics_admin.CreateConversionEventRequest],
Union[resources.ConversionEvent, Awaitable[resources.ConversionEvent]],
]:
raise NotImplementedError()
@property
def get_conversion_event(
self,
) -> Callable[
[analytics_admin.GetConversionEventRequest],
Union[resources.ConversionEvent, Awaitable[resources.ConversionEvent]],
]:
raise NotImplementedError()
@property
def delete_conversion_event(
self,
) -> Callable[
[analytics_admin.DeleteConversionEventRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_conversion_events(
self,
) -> Callable[
[analytics_admin.ListConversionEventsRequest],
Union[
analytics_admin.ListConversionEventsResponse,
Awaitable[analytics_admin.ListConversionEventsResponse],
],
]:
raise NotImplementedError()
@property
def get_display_video360_advertiser_link(
self,
) -> Callable[
[analytics_admin.GetDisplayVideo360AdvertiserLinkRequest],
Union[
resources.DisplayVideo360AdvertiserLink,
Awaitable[resources.DisplayVideo360AdvertiserLink],
],
]:
raise NotImplementedError()
@property
def list_display_video360_advertiser_links(
self,
) -> Callable[
[analytics_admin.ListDisplayVideo360AdvertiserLinksRequest],
Union[
analytics_admin.ListDisplayVideo360AdvertiserLinksResponse,
Awaitable[analytics_admin.ListDisplayVideo360AdvertiserLinksResponse],
],
]:
raise NotImplementedError()
@property
def create_display_video360_advertiser_link(
self,
) -> Callable[
[analytics_admin.CreateDisplayVideo360AdvertiserLinkRequest],
Union[
resources.DisplayVideo360AdvertiserLink,
Awaitable[resources.DisplayVideo360AdvertiserLink],
],
]:
raise NotImplementedError()
@property
def delete_display_video360_advertiser_link(
self,
) -> Callable[
[analytics_admin.DeleteDisplayVideo360AdvertiserLinkRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def update_display_video360_advertiser_link(
self,
) -> Callable[
[analytics_admin.UpdateDisplayVideo360AdvertiserLinkRequest],
Union[
resources.DisplayVideo360AdvertiserLink,
Awaitable[resources.DisplayVideo360AdvertiserLink],
],
]:
raise NotImplementedError()
@property
def get_display_video360_advertiser_link_proposal(
self,
) -> Callable[
[analytics_admin.GetDisplayVideo360AdvertiserLinkProposalRequest],
Union[
resources.DisplayVideo360AdvertiserLinkProposal,
Awaitable[resources.DisplayVideo360AdvertiserLinkProposal],
],
]:
raise NotImplementedError()
@property
def list_display_video360_advertiser_link_proposals(
self,
) -> Callable[
[analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsRequest],
Union[
analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsResponse,
Awaitable[
analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsResponse
],
],
]:
raise NotImplementedError()
@property
def create_display_video360_advertiser_link_proposal(
self,
) -> Callable[
[analytics_admin.CreateDisplayVideo360AdvertiserLinkProposalRequest],
Union[
resources.DisplayVideo360AdvertiserLinkProposal,
Awaitable[resources.DisplayVideo360AdvertiserLinkProposal],
],
]:
raise NotImplementedError()
@property
def delete_display_video360_advertiser_link_proposal(
self,
) -> Callable[
[analytics_admin.DeleteDisplayVideo360AdvertiserLinkProposalRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def approve_display_video360_advertiser_link_proposal(
self,
) -> Callable[
[analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalRequest],
Union[
analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalResponse,
Awaitable[
analytics_admin.ApproveDisplayVideo360AdvertiserLinkProposalResponse
],
],
]:
raise NotImplementedError()
@property
def cancel_display_video360_advertiser_link_proposal(
self,
) -> Callable[
[analytics_admin.CancelDisplayVideo360AdvertiserLinkProposalRequest],
Union[
resources.DisplayVideo360AdvertiserLinkProposal,
Awaitable[resources.DisplayVideo360AdvertiserLinkProposal],
],
]:
raise NotImplementedError()
@property
def create_custom_dimension(
self,
) -> Callable[
[analytics_admin.CreateCustomDimensionRequest],
Union[resources.CustomDimension, Awaitable[resources.CustomDimension]],
]:
raise NotImplementedError()
@property
def update_custom_dimension(
self,
) -> Callable[
[analytics_admin.UpdateCustomDimensionRequest],
Union[resources.CustomDimension, Awaitable[resources.CustomDimension]],
]:
raise NotImplementedError()
@property
def list_custom_dimensions(
self,
) -> Callable[
[analytics_admin.ListCustomDimensionsRequest],
Union[
analytics_admin.ListCustomDimensionsResponse,
Awaitable[analytics_admin.ListCustomDimensionsResponse],
],
]:
raise NotImplementedError()
@property
def archive_custom_dimension(
self,
) -> Callable[
[analytics_admin.ArchiveCustomDimensionRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_custom_dimension(
self,
) -> Callable[
[analytics_admin.GetCustomDimensionRequest],
Union[resources.CustomDimension, Awaitable[resources.CustomDimension]],
]:
raise NotImplementedError()
@property
def create_custom_metric(
self,
) -> Callable[
[analytics_admin.CreateCustomMetricRequest],
Union[resources.CustomMetric, Awaitable[resources.CustomMetric]],
]:
raise NotImplementedError()
@property
def update_custom_metric(
self,
) -> Callable[
[analytics_admin.UpdateCustomMetricRequest],
Union[resources.CustomMetric, Awaitable[resources.CustomMetric]],
]:
raise NotImplementedError()
@property
def list_custom_metrics(
self,
) -> Callable[
[analytics_admin.ListCustomMetricsRequest],
Union[
analytics_admin.ListCustomMetricsResponse,
Awaitable[analytics_admin.ListCustomMetricsResponse],
],
]:
raise NotImplementedError()
@property
def archive_custom_metric(
self,
) -> Callable[
[analytics_admin.ArchiveCustomMetricRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_custom_metric(
self,
) -> Callable[
[analytics_admin.GetCustomMetricRequest],
Union[resources.CustomMetric, Awaitable[resources.CustomMetric]],
]:
raise NotImplementedError()
@property
def get_data_retention_settings(
self,
) -> Callable[
[analytics_admin.GetDataRetentionSettingsRequest],
Union[
resources.DataRetentionSettings, Awaitable[resources.DataRetentionSettings]
],
]:
raise NotImplementedError()
@property
def update_data_retention_settings(
self,
) -> Callable[
[analytics_admin.UpdateDataRetentionSettingsRequest],
Union[
resources.DataRetentionSettings, Awaitable[resources.DataRetentionSettings]
],
]:
raise NotImplementedError()
@property
def create_data_stream(
self,
) -> Callable[
[analytics_admin.CreateDataStreamRequest],
Union[resources.DataStream, Awaitable[resources.DataStream]],
]:
raise NotImplementedError()
@property
def delete_data_stream(
self,
) -> Callable[
[analytics_admin.DeleteDataStreamRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def update_data_stream(
self,
) -> Callable[
[analytics_admin.UpdateDataStreamRequest],
Union[resources.DataStream, Awaitable[resources.DataStream]],
]:
raise NotImplementedError()
@property
def list_data_streams(
self,
) -> Callable[
[analytics_admin.ListDataStreamsRequest],
Union[
analytics_admin.ListDataStreamsResponse,
Awaitable[analytics_admin.ListDataStreamsResponse],
],
]:
raise NotImplementedError()
@property
def get_data_stream(
self,
) -> Callable[
[analytics_admin.GetDataStreamRequest],
Union[resources.DataStream, Awaitable[resources.DataStream]],
]:
raise NotImplementedError()
__all__ = ("AnalyticsAdminServiceTransport",)
|
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_keyvault
version_added: "2.5"
short_description: Manage Key Vault instance
description:
- Create, update and delete instance of Key Vault.
options:
resource_group:
description:
- The name of the Resource Group to which the server belongs.
required: True
vault_name:
description:
- Name of the vault.
required: True
location:
description:
- Resource location. If not set, location from the resource group will be used as default.
vault_tenant:
description:
- The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
sku:
description:
- SKU details.
suboptions:
family:
description:
- SKU family name.
name:
description:
- SKU name to specify whether the key vault is a standard vault or a premium vault.
required: True
choices:
- 'standard'
- 'premium'
access_policies:
description:
- An array of 0 to 16 identities that have access to the key vault.
- All identities in the array must use the same tenant ID as the key vault's tenant ID.
suboptions:
tenant_id:
description:
- The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
- Current keyvault C(tenant_id) value will be used if not specified.
object_id:
description:
- The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault.
- The object ID must be unique for the list of access policies.
- Please note this is not application id. Object id can be obtained by running "az ad sp show --id <application id>".
required: True
application_id:
description:
- Application ID of the client making request on behalf of a principal.
keys:
description:
- List of permissions to keys.
choices:
- 'encrypt'
- 'decrypt'
- 'wrapkey'
- 'unwrapkey'
- 'sign'
- 'verify'
- 'get'
- 'list'
- 'create'
- 'update'
- 'import'
- 'delete'
- 'backup'
- 'restore'
- 'recover'
- 'purge'
secrets:
description:
- List of permissions to secrets.
choices:
- 'get'
- 'list'
- 'set'
- 'delete'
- 'backup'
- 'restore'
- 'recover'
- 'purge'
certificates:
description:
- List of permissions to certificates.
choices:
- 'get'
- 'list'
- 'delete'
- 'create'
- 'import'
- 'update'
- 'managecontacts'
- 'getissuers'
- 'listissuers'
- 'setissuers'
- 'deleteissuers'
- 'manageissuers'
- 'recover'
- 'purge'
storage:
description:
- List of permissions to storage accounts.
enabled_for_deployment:
description:
- Property to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault.
type: bool
enabled_for_disk_encryption:
description:
- Property to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys.
type: bool
enabled_for_template_deployment:
description:
- Property to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault.
type: bool
enable_soft_delete:
description:
- Property to specify whether the soft delete functionality is enabled for this key vault.
type: bool
recover_mode:
description:
- Create vault in recovery mode.
type: bool
state:
description:
- Assert the state of the KeyVault. Use C(present) to create or update an KeyVault and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create instance of Key Vault
azure_rm_keyvault:
resource_group: myResourceGroup
vault_name: samplekeyvault
enabled_for_deployment: yes
vault_tenant: 72f98888-8666-4144-9199-2d7cd0111111
sku:
name: standard
access_policies:
- tenant_id: 72f98888-8666-4144-9199-2d7cd0111111
object_id: 99998888-8666-4144-9199-2d7cd0111111
keys:
- get
- list
'''
RETURN = '''
id:
description:
- The Azure Resource Manager resource ID for the key vault.
returned: always
type: str
sample: id
'''
import collections
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.keyvault import KeyVaultManagementClient
from msrest.polling import LROPoller
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMVaults(AzureRMModuleBase):
"""Configuration class for an Azure RM Key Vault resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
vault_name=dict(
type='str',
required=True
),
location=dict(
type='str'
),
vault_tenant=dict(
type='str'
),
sku=dict(
type='dict'
),
access_policies=dict(
type='list',
elements='dict',
options=dict(
tenant_id=dict(type='str'),
object_id=dict(type='str', required=True),
application_id=dict(type='str'),
# FUTURE: add `choices` support once choices supports lists of values
keys=dict(type='list'),
secrets=dict(type='list'),
certificates=dict(type='list'),
storage=dict(type='list')
)
),
enabled_for_deployment=dict(
type='bool'
),
enabled_for_disk_encryption=dict(
type='bool'
),
enabled_for_template_deployment=dict(
type='bool'
),
enable_soft_delete=dict(
type='bool'
),
recover_mode=dict(
type='bool'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.module_required_if = [['state', 'present', ['vault_tenant']]]
self.resource_group = None
self.vault_name = None
self.parameters = dict()
self.tags = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMVaults, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
required_if=self.module_required_if)
def exec_module(self, **kwargs):
"""Main module execution method"""
# translate Ansible input to SDK-formatted dict in self.parameters
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "location":
self.parameters["location"] = kwargs[key]
elif key == "vault_tenant":
self.parameters.setdefault("properties", {})["tenant_id"] = kwargs[key]
elif key == "sku":
self.parameters.setdefault("properties", {})["sku"] = kwargs[key]
elif key == "access_policies":
access_policies = kwargs[key]
for policy in access_policies:
if 'keys' in policy:
policy.setdefault("permissions", {})["keys"] = policy["keys"]
policy.pop("keys", None)
if 'secrets' in policy:
policy.setdefault("permissions", {})["secrets"] = policy["secrets"]
policy.pop("secrets", None)
if 'certificates' in policy:
policy.setdefault("permissions", {})["certificates"] = policy["certificates"]
policy.pop("certificates", None)
if 'storage' in policy:
policy.setdefault("permissions", {})["storage"] = policy["storage"]
policy.pop("storage", None)
if policy.get('tenant_id') is None:
# default to key vault's tenant, since that's all that's currently supported anyway
policy['tenant_id'] = kwargs['vault_tenant']
self.parameters.setdefault("properties", {})["access_policies"] = access_policies
elif key == "enabled_for_deployment":
self.parameters.setdefault("properties", {})["enabled_for_deployment"] = kwargs[key]
elif key == "enabled_for_disk_encryption":
self.parameters.setdefault("properties", {})["enabled_for_disk_encryption"] = kwargs[key]
elif key == "enabled_for_template_deployment":
self.parameters.setdefault("properties", {})["enabled_for_template_deployment"] = kwargs[key]
elif key == "enable_soft_delete":
self.parameters.setdefault("properties", {})["enable_soft_delete"] = kwargs[key]
elif key == "recover_mode":
self.parameters.setdefault("properties", {})["create_mode"] = 'recover' if kwargs[key] else 'default'
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(KeyVaultManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version="2018-02-14")
resource_group = self.get_resource_group(self.resource_group)
if "location" not in self.parameters:
self.parameters["location"] = resource_group.location
old_response = self.get_keyvault()
if not old_response:
self.log("Key Vault instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Key Vault instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Key Vault instance has to be deleted or may be updated")
if ('location' in self.parameters) and (self.parameters['location'] != old_response['location']):
self.to_do = Actions.Update
elif ('tenant_id' in self.parameters) and (self.parameters['tenant_id'] != old_response['tenant_id']):
self.to_do = Actions.Update
elif ('enabled_for_deployment' in self.parameters) and (self.parameters['enabled_for_deployment'] != old_response['enabled_for_deployment']):
self.to_do = Actions.Update
elif (('enabled_for_disk_encryption' in self.parameters) and
(self.parameters['enabled_for_deployment'] != old_response['enabled_for_deployment'])):
self.to_do = Actions.Update
elif (('enabled_for_template_deployment' in self.parameters) and
(self.parameters['enabled_for_template_deployment'] != old_response['enabled_for_template_deployment'])):
self.to_do = Actions.Update
elif ('enable_soft_delete' in self.parameters) and (self.parameters['enabled_soft_delete'] != old_response['enable_soft_delete']):
self.to_do = Actions.Update
elif ('create_mode' in self.parameters) and (self.parameters['create_mode'] != old_response['create_mode']):
self.to_do = Actions.Update
elif 'access_policies' in self.parameters['properties']:
if len(self.parameters['properties']['access_policies']) != len(old_response['properties']['access_policies']):
self.to_do = Actions.Update
else:
# FUTURE: this list isn't really order-dependent- we should be set-ifying the rules list for order-independent comparison
for i in range(len(old_response['properties']['access_policies'])):
n = self.parameters['properties']['access_policies'][i]
o = old_response['properties']['access_policies'][i]
if n.get('tenant_id', False) != o.get('tenant_id', False):
self.to_do = Actions.Update
break
if n.get('object_id', None) != o.get('object_id', None):
self.to_do = Actions.Update
break
if n.get('application_id', None) != o.get('application_id', None):
self.to_do = Actions.Update
break
if sorted(n.get('keys', [])) != sorted(o.get('keys', [])):
self.to_do = Actions.Update
break
if sorted(n.get('secrets', [])) != sorted(o.get('secrets', [])):
self.to_do = Actions.Update
break
if sorted(n.get('certificates', [])) != sorted(o.get('certificates', [])):
self.to_do = Actions.Update
break
if sorted(n.get('storage', [])) != sorted(o.get('storage', [])):
self.to_do = Actions.Update
break
update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
if update_tags:
self.to_do = Actions.Update
self.tags = newtags
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Key Vault instance")
if self.check_mode:
self.results['changed'] = True
return self.results
self.parameters["tags"] = self.tags
response = self.create_update_keyvault()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Key Vault instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_keyvault()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_keyvault():
time.sleep(20)
else:
self.log("Key Vault instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_keyvault(self):
'''
Creates or updates Key Vault with the specified configuration.
:return: deserialized Key Vault instance state dictionary
'''
self.log("Creating / Updating the Key Vault instance {0}".format(self.vault_name))
try:
response = self.mgmt_client.vaults.create_or_update(resource_group_name=self.resource_group,
vault_name=self.vault_name,
parameters=self.parameters)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Key Vault instance.')
self.fail("Error creating the Key Vault instance: {0}".format(str(exc)))
return response.as_dict()
def delete_keyvault(self):
'''
Deletes specified Key Vault instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Key Vault instance {0}".format(self.vault_name))
try:
response = self.mgmt_client.vaults.delete(resource_group_name=self.resource_group,
vault_name=self.vault_name)
except CloudError as e:
self.log('Error attempting to delete the Key Vault instance.')
self.fail("Error deleting the Key Vault instance: {0}".format(str(e)))
return True
def get_keyvault(self):
'''
Gets the properties of the specified Key Vault.
:return: deserialized Key Vault instance state dictionary
'''
self.log("Checking if the Key Vault instance {0} is present".format(self.vault_name))
found = False
try:
response = self.mgmt_client.vaults.get(resource_group_name=self.resource_group,
vault_name=self.vault_name)
found = True
self.log("Response : {0}".format(response))
self.log("Key Vault instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Key Vault instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMVaults()
if __name__ == '__main__':
main()
|
|
"""
Normal Django models with a few custom options for configuration.
If you have custom model classes that need these options, add them here and
create a child class of the appropriate options class and your custom model class.
"""
# Django
from django.db import models
class Options():
def __init__(self, di_show=False, di_display_name=None, di_hr_primary_key=False,
di_choose=False, di_combinable=False,
*args, **kwargs):
"""
Custom options for DISBi fields.
Args:
di_show (bool): Determines whether the column should be
included in the result table.
di_display_name (str): Will be used as column header in the result table.
di_hr_primary_key (bool): Determines whether the column should
be used for identifying rows. If true column must be unique
and may not be `null` or `blank`. Only one di_hr_primary_key
is allowed per model.
TODO: enforce this
"""
self.di_show = di_show
self.di_display_name = di_display_name
self.di_hr_primary_key = di_hr_primary_key
self.di_choose = di_choose
self.di_combinable = di_combinable
super().__init__(*args, **kwargs)
class RelationshipOptions():
def __init__(self, to, di_show=False, di_display_name=None, di_hr_primary_key=False,
di_choose=False, di_combinable=False,
*args, **kwargs):
"""
Custom options for DISBi relationship fields, which have a different
signature than normal fields.
Args:
di_show (bool): Determines whether the column should be
included in the result table.
di_display_name (str): Will be used as column header in the result table.
di_hr_primary_key (bool): Determines whether the column should
be used for identifying rows. If true column must be unique
and may not be `null` or `blank`. Only one di_hr_primary_key
is allowed per model.
TODO: enforce this
"""
self.di_show = di_show
self.display_name = di_display_name
self.di_hr_primary_key = di_hr_primary_key
self.di_choose = di_choose
self.di_combinable = di_combinable
super().__init__(to, *args, **kwargs)
class ExcludeOptions(Options):
"""
Adds the `exclude` option, to exclude rows where this field
evaluates to `False`. Should be only used on Bool fields.
"""
def __init__(self, di_exclude=False, di_show=False, di_display_name=None,
di_hr_primary_key=False, di_choose=False, di_combinable=False,
*args, **kwargs):
self.di_exclude = di_exclude
super().__init__(di_show, di_display_name, di_hr_primary_key, di_choose,
di_combinable
*args, **kwargs)
class FloatField(Options, models.FloatField):
"""
FloatField with custom DISBi options.
"""
pass
class BigIntegerField(Options, models.BigIntegerField):
"""
BigIntegerField with custom DISBi options.
"""
pass
class BinaryField(Options, models.BinaryField):
"""
BinaryField with custom DISBi options.
"""
pass
class CommaSeparatedIntegerField(Options, models.CommaSeparatedIntegerField):
"""
CommaSeparatedIntegerField with custom DISBi options.
"""
pass
class CharField(Options, models.CharField):
"""
CharField with custom DISBi options.
"""
pass
class DateField(Options, models.DateField):
"""
DateField with custom DISBi options.
"""
pass
class DateTimeField(Options, models.DateTimeField):
"""
DateTimeField with custom DISBi options.
"""
pass
class DecimalField(Options, models.DecimalField):
"""
DecimalField with custom DISBi options.
"""
pass
class DurationField(Options, models.DurationField):
"""
DurationField with custom DISBi options.
"""
pass
class EmailField(Options, models.EmailField):
"""
EmailField with custom DISBi options.
"""
pass
class FileField(Options, models.FileField):
"""
FileField with custom DISBi options.
"""
pass
class FilePathField(Options, models.FilePathField):
"""
FilePathField with custom DISBi options.
"""
pass
class ImageField(Options, models.ImageField):
"""
ImageField with custom DISBi options.
"""
pass
class IntegerField(Options, models.IntegerField):
"""
IntegerField with custom DISBi options.
"""
pass
class GenericIPAddressField(Options, models.GenericIPAddressField):
"""
GenericIPAddressField with custom DISBi options.
"""
pass
class PositiveIntegerField(Options, models.PositiveIntegerField):
"""
PositiveIntegerField with custom DISBi options.
"""
pass
class PositiveSmallIntegerField(Options, models.PositiveSmallIntegerField):
"""
PositiveSmallIntegerField with custom DISBi options.
"""
pass
class SlugField(Options, models.SlugField):
"""
SlugField with custom DISBi options.
"""
pass
class SmallIntegerField(Options, models.SmallIntegerField):
"""
SmallIntegerField with custom DISBi options.
"""
pass
class TextField(Options, models.TextField):
"""
TextField with custom DISBi options.
"""
pass
class TimeField(Options, models.TimeField):
"""
TimeField with custom DISBi options.
"""
pass
class URLField(Options, models.URLField):
"""
URLField with custom DISBi options.
"""
pass
class UUIDField(Options, models.UUIDField):
"""
UUIDField with custom DISBi options.
"""
pass
class ForeignKey(RelationshipOptions, models.ForeignKey):
"""
ForeignKey with custom DISBi options.
"""
pass
class ManyToManyField(RelationshipOptions, models.ManyToManyField):
"""
ManyToManyField with custom DISBi options.
"""
pass
class OneToOneField(RelationshipOptions, models.OneToOneField):
"""
OneToOneField with custom DISBi options.
"""
pass
class NullBooleanField(ExcludeOptions, models.NullBooleanField):
"""
NullBooleanField with custom DISBi and exclude options.
"""
pass
class BooleanField(ExcludeOptions, models.BooleanField):
"""
BooleanField with custom DISBi and exclude options.
"""
pass
class EmptyCharField(Options, models.CharField):
"""
FloatField with custom DISBi options and the option to add an
empty value displayer.
"""
def __init__(self, di_empty=None, di_show=True, di_display_name=None, di_hr_primary_key=False,
di_choose=False, di_combinable=False,
*args, **kwargs):
self.di_empty = di_empty
super().__init__(di_show, di_display_name, di_hr_primary_key, di_choose, di_combinable,
*args, **kwargs)
|
|
#!/usr/bin/env python
import base64
import logging
import urllib.request, urllib.parse, urllib.error
import grequests
try:
import simplejson as json
except ImportError:
import json
log = logging.getLogger(__name__)
class CollinsClient:
"""
This client will help you interface with Collins in a meaningful way
giving access to all the different apis that Collins allows.
"""
def __init__(self, username, passwd, host):
self.username = username
self.passwd = passwd
self.host = host
def async_update_asset(self, tag, params=None):
if params is None:
params = {}
url = "/api/asset/%s" % tag
return grequests.post(self.host+url, auth=(self.username, self.passwd), data=params)
def async_asset_finder(self, params=None):
if params is None:
params = {}
url = "/api/assets"
return grequests.get(self.host+url, auth=(self.username, self.passwd), params=params)
def assets(self, params=None):
"""
Finds assets matching the following criteria:
attribute - string, optional. Specified as keyname;valuename. keyname can be a reserved meta tag such as CPU_COUNT, MEMORY_SIZE_BYTES, etc.
type - string, optional. One of SERVER_NODE, SERVER_CHASSIS, etc.
status - string, optional. One of New, Incomplete, etc.
createdAfter - ISO8601 date, optional.
createdBefore - ISO8601 date, optional.
updatedAfter - ISO8601 date, optional.
updatedBefore - ISO8601 date, optional.
#TODO add pagination support
"""
if params is None:
params = {}
url = "/api/assets"
response = self._query("get", url, params)
return response
def create_asset(self, tag, params=None):
"""
"""
if params is None:
params = {}
url = "/api/asset/%s" % tag
response = self._query("put", url, params)
return response
def update_asset(self, tag, params=None):
"""
"""
if params is None:
params = {}
url = "/api/asset/%s" % tag
response = self._query("post", url, params)
return response
def delete_asset(self, tag, params=None):
"""
"""
if params is None:
params = {}
url = "/api/asset/%s" % tag
response = self._query("delete", url, params)
return response
def delete_asset_attribute(self, tag, attribute):
"""
"""
url = "/api/asset/%s/attribute/%s" % (tag, attribute)
response = self._query("delete", url, {})
return response
def asset_finder(self, params=None):
if params is None:
params = {}
url = "/api/assets"
response = self._query("get", url, params)
return response
def asset_info(self, tag, params=None):
"""
"""
if params is None:
params = {}
url = "/api/asset/%s" % tag
response = self._query("get", url, params)
return response
def assets_logs(self, tag, params=None):
"""
"""
if params is None:
params = {}
url = "/api/asset/%s/logs" % tag
response = self._query("get", url, params)
return response
def create_assets_log(self, tag, params=None):
"""
"""
if params is None:
params = {}
url = "/api/asset/%s/log" % tag
response = self._query("put", url, params)
return response
def ping(self):
url = "/api/ping"
response = self._query("get", url, {})
return response
def create_assettype(self, name, label):
"""
Description Create a new asset type
Request PUT /api/assettype/:name
Permission controllers.AssetTypeApi.createAssetType
Parameters
Name Type Description
name String Must be alphanumeric (but can include underscores and dashes) and unique
label String Human readable label for the asset type. 2-32 characters.
Response Codes
Code Reason
201 Asset type was successfully created
409 Asset type with specified name already exists
"""
url = "/api/assettype/{0}".format(name)
params = {'label': label}
response = self._query("put", url, params)
return response
def update_assettype(self, name, label=None, newname=None):
"""
Description Update an asset type
Request POST /api/assettype/:tag
Permission controllers.AssetTypeApi.updateAssetType
Parameters
Name Type Description
name String New name of the asset type (i.e. SERVICE). All uppercase, 2-32 chars.
label String New label of the asset type. 2-32 chars.
Response Codes
Code Reason
200 Asset type updated successfully
404 The specified asset type was not found
"""
url = "/api/assettype/{0}".format(name)
params = {}
if label:
params['label'] = label
if newname:
params['name'] = newname
response = self._query("post", url, params)
return response
def get_assettype(self, name):
"""
Description Get an asset type by name
Request GET /api/assettype/:name
Permission controllers.AssetTypeApi.getAssetType
Parameters
Name Type Description
name String Must be alphanumeric (but can include underscores and dashes) and unique
Response Codes
Code Reason
200 Asset type was found
404 Asset type could not be found
"""
url = "/api/assettype/{0}".format(name)
response = self._query("get", url)
return response
def delete_assettype(self, name):
""" Delete the specified asset type
:param name: Asset unique name
:return dict
Request DELETE /api/assettype/:name
Permission controllers.AssetTypeApi.deleteAssetType
Parameters
Name Type Description
name String Must be alphanumeric (but can include underscores and dashes) and unique
Response Codes
Code Reason
200 Asset type has been deleted
404 Asset type not found
409 System asset types cannot be deleted
500 Asset type unable to be deleted (Assets of this type still exist?)
"""
url = "/api/assettype/{0}".format(name)
response = self._query("delete", url)
return response
def ensure_assettype(self, name, label):
""" Ensure assettype exists.
:param name: Asset type name
:param label: Asset type descriptive label
:return: dict
"""
try:
response = self.create_assettype(name, label)
except urllib.error.HTTPError as e:
if e.code == 409:
response = {'status': 'success:exists',
'data': {'SUCCESS': True}}
return response
def ensure_asset(self, tag, params=None):
""" Ensure asset exists
:param tag: Unique asset tag
:param params: dict
:return: dict
"""
if params is None:
params = {}
try:
response = self.create_asset(tag, params)
except urllib.error.HTTPError as e:
if e.code == 409:
response = dict(status='success:exists',
data={'SUCCESS': True})
else:
response = dict(status='failure:{0}'.format(e.code),
data={'SUCCESS': False})
if not response['status'].startswith('success'):
log.warning(response['status'])
return response
def soft_update(self, tag, key, value):
old_record = self.asset_info(tag)
# everything from the API is a string
value = str(value)
update = True
old = None
if 'ATTRIBS' in list(old_record['data'].keys()):
# If no attributes have ever been stored, [u'0'] doesn't
# exist.
log.debug(len(old_record['data']['ATTRIBS']))
if len(old_record['data']['ATTRIBS']) > 0:
attribs = old_record['data']['ATTRIBS']['0']
else:
attribs = {}
if key.upper() in list(attribs.keys()):
old = attribs[key.upper()]
if old == value:
update = False
# Never erase
if value == '' or value == 'None':
update = False
if update:
log.debug('{0}: Will update {1} from {2} to {3}'.format(tag, key, old, value))
self.update_asset(tag, {'attribute': '{0};{1}'.format(key, value)})
else:
log.debug('{0}: No change to {1}, no update needed'.format(tag, key))
def _query(self, method, url, params=None):
"""
"""
if params is None:
params = {}
handle = urllib.request.build_opener(urllib.request.HTTPHandler)
# Eventually making this a little more robust
if method in ['post', 'put']:
request = urllib.request.Request(self.host+url, data=urllib.parse.urlencode(params, doseq=True))
else:
if params:
url += "?" + urllib.parse.urlencode(params, doseq=True)
request = urllib.request.Request(self.host+url)
authstring = base64.encodebytes(('%s:%s' % (self.username, self.passwd)).encode('ascii')).strip()
request.add_header("Authorization", "Basic %s" % authstring.decode('ascii'))
# Python does not support case statements
# This will override the request method, defaulting to get
request.get_method = {
"get" : lambda: "GET",
"post" : lambda: "POST",
"put" : lambda: "PUT",
"delete" : lambda: "DELETE"
}.get(method, "get")
# TODO little more robust
response = handle.open(request).read()
response = json.loads(response)
return response
|
|
#!/usr/bin/env trial
import copy
import difflib
import logging
import mock
import os
import sys
from absl import flags as gflags
from ct.client import log_client
from ct.client.db import sqlite_connection as sqlitecon
from ct.client.db import sqlite_log_db
from ct.client import state
from ct.client import monitor
from ct.crypto import error
from ct.crypto import merkle
from ct.proto import client_pb2
from twisted.internet import defer
from twisted.trial import unittest
from twisted.web import iweb
from zope.interface import implements
FLAGS = gflags.FLAGS
#TODO(ekasper) to make this setup common to all tests
gflags.DEFINE_bool("verbose_tests", False, "Print test logs")
def dummy_compute_projected_sth(old_sth):
sth = client_pb2.SthResponse()
sth.timestamp = old_sth.timestamp
sth.tree_size = size = old_sth.tree_size
tree = merkle.CompactMerkleTree(
merkle.TreeHasher(), size, ["a"] * merkle.count_bits_set(size))
f = mock.Mock(return_value=(sth, tree))
f.dummy_sth = sth
f.dummy_tree = tree
old_sth.sha256_root_hash = tree.root_hash()
return f
# TODO(robpercival): This is a relatively complicated fake, and may hide subtle
# bugs in how the Monitor interacts with the real EntryProducer. Using the real
# EntryProducer with a FakeAgent, as async_log_client_test does, may be an
# improvement.
class FakeEntryProducer(object):
def __init__(self, start, end, batch_size=None, throw=None):
self._start = start
self._end = end
self._real_start = start
self._real_end = end
self.throw = throw
self.batch_size = batch_size if batch_size else end - start + 1
self.stop = False
@defer.deferredGenerator
def produce(self):
if self.throw:
raise self.throw
for i in range(self._start, self._end, self.batch_size):
entries = []
for j in range(i, min(i + self.batch_size, self._end)):
entry = client_pb2.EntryResponse()
entry.leaf_input = "leaf_input-%d" % j
entry.extra_data = "extra_data-%d" % j
entries.append(entry)
d = self.consumer.consume(entries)
wfd = defer.waitForDeferred(d)
yield wfd
wfd.getResult()
if self.stop:
break
if not self.stop:
self.done.callback(self._end - self._start + 1)
def startProducing(self, consumer):
self.stop = False
self._start = self._real_start
self._end = self._real_end
self.consumer = consumer
self.done = defer.Deferred()
d = self.produce()
d.addErrback(self.stopProducing)
return self.done
def change_range_after_start(self, start, end):
"""Changes query interval exactly when startProducing is ran.
EntryConsumer in Monitor uses Producer interval, so in one of the tests
we have to be able to change that interval when producing is started,
but after consumer is created."""
self._real_start = start
self._real_end = end
def stopProducing(self, failure=None):
self.stop = True
if failure:
self.done.errback(failure)
class FakeLogClient(object):
def __init__(self, sth, servername="log_server", batch_size=None,
get_entries_throw=None):
self.servername = servername
self.sth = sth
self.batch_size = batch_size
self.get_entries_throw = get_entries_throw
def get_sth(self):
d = defer.Deferred()
d.callback(self.sth)
return d
def get_entries(self, start, end):
return FakeEntryProducer(start, end, self.batch_size,
self.get_entries_throw)
def get_sth_consistency(self, old_tree, new_tree):
d = defer.Deferred()
d.callback([])
return d
class InMemoryStateKeeper(object):
def __init__(self, state=None):
self.state = state
def write(self, state):
self.state = state
def read(self, state_type):
if not self.state:
raise state.FileNotFoundError("Boom!")
return_state = state_type()
return_state.CopyFrom(self.state)
return return_state
class MonitorTest(unittest.TestCase):
_DEFAULT_STH = client_pb2.SthResponse()
_DEFAULT_STH.timestamp = 2000
_DEFAULT_STH.tree_size = 10
_DEFAULT_STH.tree_head_signature = "sig"
_DEFAULT_STH_compute_projected = dummy_compute_projected_sth(_DEFAULT_STH)
_NEW_STH = client_pb2.SthResponse()
_NEW_STH.timestamp = 3000
_NEW_STH.tree_size = _DEFAULT_STH.tree_size + 10
_NEW_STH.tree_head_signature = "sig2"
_NEW_STH_compute_projected = dummy_compute_projected_sth(_NEW_STH)
_DEFAULT_STATE = client_pb2.MonitorState()
_DEFAULT_STATE.verified_sth.CopyFrom(_DEFAULT_STH)
_DEFAULT_STH_compute_projected.dummy_tree.save(
_DEFAULT_STATE.unverified_tree)
_DEFAULT_STH_compute_projected.dummy_tree.save(
_DEFAULT_STATE.verified_tree)
def setUp(self):
if not FLAGS.verbose_tests:
logging.disable(logging.CRITICAL)
self.db = sqlite_log_db.SQLiteLogDB(
sqlitecon.SQLiteConnectionManager(":memory:", keepalive=True))
# We can't simply use DB in memory with keepalive True, because different
# thread is writing to the database which results in an sqlite exception.
self.cert_db = mock.MagicMock()
self.state_keeper = InMemoryStateKeeper(copy.deepcopy(self._DEFAULT_STATE))
self.verifier = mock.Mock()
self.hasher = merkle.TreeHasher()
# Make sure the DB knows about the default log server.
log = client_pb2.CtLogMetadata()
log.log_server = "log_server"
self.db.add_log(log)
def verify_state(self, expected_state):
if self.state_keeper.state != expected_state:
state_diff = difflib.unified_diff(
str(expected_state).splitlines(),
str(self.state_keeper.state).splitlines(),
fromfile="expected", tofile="actual", lineterm="", n=5)
raise unittest.FailTest("State is incorrect\n" +
"\n".join(state_diff))
def verify_tmp_data(self, start, end):
# TODO: we are no longer using the temp db
# all the callsites should be updated to test the main db instead
pass
def create_monitor(self, client, skip_scan_entry=True):
m = monitor.Monitor(client, self.verifier, self.hasher, self.db,
self.cert_db, 7, self.state_keeper)
if m:
m._scan_entries = mock.Mock()
return m
def check_db_state_after_successful_updates(self, number_of_updates):
audited_sths = list(self.db.scan_latest_sth_range("log_server"))
for index, audited_sth in enumerate(audited_sths):
if index % 2 != 0:
self.assertEqual(client_pb2.UNVERIFIED,
audited_sth.audit.status)
else:
self.assertEqual(client_pb2.VERIFIED,
audited_sth.audit.status)
self.assertEqual(len(audited_sths), number_of_updates * 2)
def test_update(self):
client = FakeLogClient(self._NEW_STH)
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._NEW_STH_compute_projected
def check_state(result):
# Check that we wrote the state...
expected_state = client_pb2.MonitorState()
expected_state.verified_sth.CopyFrom(self._NEW_STH)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.verified_tree)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.unverified_tree)
self.verify_state(expected_state)
self.verify_tmp_data(self._DEFAULT_STH.tree_size,
self._NEW_STH.tree_size-1)
self.check_db_state_after_successful_updates(1)
for audited_sth in self.db.scan_latest_sth_range(m.servername):
self.assertEqual(self._NEW_STH, audited_sth.sth)
return m.update().addCallback(self.assertTrue).addCallback(check_state)
def test_first_update(self):
client = FakeLogClient(self._DEFAULT_STH)
self.state_keeper.state = None
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._DEFAULT_STH_compute_projected
def check_state(result):
# Check that we wrote the state...
self.verify_state(self._DEFAULT_STATE)
self.verify_tmp_data(0, self._DEFAULT_STH.tree_size-1)
self.check_db_state_after_successful_updates(1)
for audited_sth in self.db.scan_latest_sth_range(m.servername):
self.assertEqual(self._DEFAULT_STH, audited_sth.sth)
d = m.update().addCallback(self.assertTrue
).addCallback(check_state)
return d
def test_update_no_new_entries(self):
client = FakeLogClient(self._DEFAULT_STH)
m = self.create_monitor(client)
d = m.update()
d.addCallback(self.assertTrue)
def check_state(result):
# Check that we kept the state...
self.verify_state(self._DEFAULT_STATE)
# ...and wrote no entries.
self.check_db_state_after_successful_updates(0)
d.addCallback(check_state)
return d
def test_update_recovery(self):
client = FakeLogClient(self._NEW_STH)
# Setup initial state to be as though an update had failed part way
# through.
initial_state = copy.deepcopy(self._DEFAULT_STATE)
initial_state.pending_sth.CopyFrom(self._NEW_STH)
self._NEW_STH_compute_projected.dummy_tree.save(
initial_state.unverified_tree)
self.state_keeper.write(initial_state)
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._NEW_STH_compute_projected
d = m.update()
d.addCallback(self.assertTrue)
def check_state(result):
# Check that we wrote the state...
expected_state = copy.deepcopy(initial_state)
expected_state.ClearField("pending_sth")
expected_state.verified_sth.CopyFrom(self._NEW_STH)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.verified_tree)
m._compute_projected_sth_from_tree.dummy_tree.save(
expected_state.unverified_tree)
self.verify_state(expected_state)
self.check_db_state_after_successful_updates(1)
for audited_sth in self.db.scan_latest_sth_range(m.servername):
self.assertEqual(self._NEW_STH, audited_sth.sth)
d.addCallback(check_state)
return d
def test_update_rolls_back_unverified_tree_on_scan_error(self):
client = FakeLogClient(self._NEW_STH)
m = self.create_monitor(client)
m._compute_projected_sth_from_tree = self._NEW_STH_compute_projected
m._scan_entries = mock.Mock(side_effect=ValueError("Boom!"))
def check_state(result):
# The changes to the unverified tree should have been discarded,
# so that entries are re-fetched and re-consumed next time.
expected_state = copy.deepcopy(self._DEFAULT_STATE)
expected_state.pending_sth.CopyFrom(self._NEW_STH)
self.verify_state(expected_state)
# The new STH should have been verified prior to the error.
audited_sths = list(self.db.scan_latest_sth_range(m.servername))
self.assertEqual(len(audited_sths), 2)
self.assertEqual(audited_sths[0].audit.status, client_pb2.VERIFIED)
self.assertEqual(audited_sths[1].audit.status, client_pb2.UNVERIFIED)
return m.update().addCallback(self.assertFalse).addCallback(check_state)
def test_update_call_sequence(self):
# Test that update calls update_sth and update_entries in sequence,
# and bails on first error, so we can test each of them separately.
# Each of these functions checks if functions were properly called
# and runs step in sequence of updates.
def check_calls_sth_fails(result):
m._update_sth.assert_called_once_with()
m._update_entries.assert_called_once_with()
m._update_sth.reset_mock()
m._update_entries.reset_mock()
m._update_sth.return_value = copy.deepcopy(d_false)
return m.update().addCallback(self.assertFalse)
def check_calls_entries_fail(result):
m._update_sth.assert_called_once_with()
self.assertFalse(m._update_entries.called)
m._update_sth.reset_mock()
m._update_entries.reset_mock()
m._update_sth.return_value = copy.deepcopy(d_true)
m._update_entries.return_value = copy.deepcopy(d_false)
return m.update().addCallback(self.assertFalse)
def check_calls_assert_last_calls(result):
m._update_sth.assert_called_once_with()
m._update_entries.assert_called_once_with()
client = FakeLogClient(self._DEFAULT_STH)
m = self.create_monitor(client)
d_true = defer.Deferred()
d_true.callback(True)
d_false = defer.Deferred()
d_false.callback(False)
#check regular correct update
m._update_sth = mock.Mock(return_value=copy.deepcopy(d_true))
m._update_entries = mock.Mock(return_value=copy.deepcopy(d_true))
d = m.update().addCallback(self.assertTrue)
d.addCallback(check_calls_sth_fails)
d.addCallback(check_calls_entries_fail)
d.addCallback(check_calls_assert_last_calls)
return d
def test_update_sth(self):
client = FakeLogClient(self._NEW_STH)
m = self.create_monitor(client)
def check_state(result):
# Check that we updated the state.
expected_state = copy.deepcopy(self._DEFAULT_STATE)
expected_state.pending_sth.CopyFrom(self._NEW_STH)
self.verify_state(expected_state)
audited_sths = list(self.db.scan_latest_sth_range(m.servername))
self.assertEqual(len(audited_sths), 2)
self.assertEqual(audited_sths[0].audit.status, client_pb2.VERIFIED)
self.assertEqual(audited_sths[1].audit.status, client_pb2.UNVERIFIED)
return m._update_sth().addCallback(self.assertTrue
).addCallback(check_state)
def test_update_sth_fails_for_invalid_sth(self):
client = FakeLogClient(self._NEW_STH)
self.verifier.verify_sth.side_effect = error.VerifyError("Boom!")
m = self.create_monitor(client)
def check_state(result):
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
self.check_db_state_after_successful_updates(0)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_sth_fails_for_stale_sth(self):
sth = client_pb2.SthResponse()
sth.CopyFrom(self._DEFAULT_STH)
sth.tree_size -= 1
sth.timestamp -= 1
client = FakeLogClient(sth)
m = self.create_monitor(client)
d = defer.Deferred()
d.callback(True)
m._verify_consistency = mock.Mock(return_value=d)
def check_state(result):
self.assertTrue(m._verify_consistency.called)
args, _ = m._verify_consistency.call_args
self.assertTrue(args[0].timestamp < args[1].timestamp)
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_sth_fails_for_inconsistent_sth(self):
client = FakeLogClient(self._NEW_STH)
# The STH is in fact OK but fake failure.
self.verifier.verify_sth_consistency.side_effect = (
error.ConsistencyError("Boom!"))
m = self.create_monitor(client)
def check_state(result):
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
audited_sths = list(self.db.scan_latest_sth_range(m.servername))
self.assertEqual(len(audited_sths), 2)
self.assertEqual(audited_sths[0].audit.status,
client_pb2.VERIFY_ERROR)
self.assertEqual(audited_sths[1].audit.status,
client_pb2.UNVERIFIED)
for audited_sth in audited_sths:
self.assertEqual(self._DEFAULT_STH.sha256_root_hash,
audited_sth.sth.sha256_root_hash)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_sth_fails_on_client_error(self):
client = FakeLogClient(self._NEW_STH)
def get_sth():
return defer.maybeDeferred(mock.Mock(side_effect=log_client.HTTPError("Boom!")))
client.get_sth = get_sth
m = self.create_monitor(client)
def check_state(result):
# Check that we kept the state.
self.verify_state(self._DEFAULT_STATE)
self.check_db_state_after_successful_updates(0)
return m._update_sth().addCallback(self.assertFalse
).addCallback(check_state)
def test_update_entries_fails_on_client_error(self):
client = FakeLogClient(self._NEW_STH,
get_entries_throw=log_client.HTTPError("Boom!"))
client.get_entries = mock.Mock(
return_value=client.get_entries(0, self._NEW_STH.tree_size - 2))
m = self.create_monitor(client)
# Get the new STH, then try (and fail) to update entries
d = m._update_sth().addCallback(self.assertTrue)
d.addCallback(lambda x: m._update_entries()).addCallback(self.assertFalse)
def check_state(result):
# Check that we wrote no entries.
expected_state = copy.deepcopy(self._DEFAULT_STATE)
expected_state.pending_sth.CopyFrom(self._NEW_STH)
self.verify_state(expected_state)
d.addCallback(check_state)
return d
def test_update_entries_fails_not_enough_entries(self):
client = FakeLogClient(self._NEW_STH)
faker_fake_entry_producer = FakeEntryProducer(0,
self._NEW_STH.tree_size)
faker_fake_entry_producer.change_range_after_start(0, 5)
client.get_entries = mock.Mock(
return_value=faker_fake_entry_producer)
m = self.create_monitor(client)
m._compute_projected_sth = self._NEW_STH_compute_projected
# Get the new STH first.
return m._update_sth().addCallback(self.assertTrue).addCallback(
lambda x: m._update_entries().addCallback(self.assertFalse))
def test_update_entries_fails_in_the_middle(self):
client = FakeLogClient(self._NEW_STH)
faker_fake_entry_producer = FakeEntryProducer(
self._DEFAULT_STH.tree_size,
self._NEW_STH.tree_size)
faker_fake_entry_producer.change_range_after_start(
self._DEFAULT_STH.tree_size, self._NEW_STH.tree_size - 5)
client.get_entries = mock.Mock(return_value=faker_fake_entry_producer)
m = self.create_monitor(client)
m._compute_projected_sth = self._NEW_STH_compute_projected
fake_fetch = mock.MagicMock()
def try_again_with_all_entries(_):
m._fetch_entries = fake_fetch
return m._update_entries()
# Get the new STH first.
return m._update_sth().addCallback(self.assertTrue).addCallback(
lambda _: m._update_entries().addCallback(self.assertFalse)
).addCallback(try_again_with_all_entries).addCallback(lambda _:
fake_fetch.assert_called_once_with(15, 19))
if __name__ == "__main__" or __name__ == "ct.client.monitor_test":
sys.argv = FLAGS(sys.argv)
|
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shards a given test suite and runs the shards in parallel.
ShardingSupervisor is called to process the command line options and creates
the specified number of worker threads. These threads then run each shard of
the test in a separate process and report on the results. When all the shards
have been completed, the supervisor reprints any lines indicating a test
failure for convenience. If only one shard is to be run, a single subprocess
is started for that shard and the output is identical to gtest's output.
"""
import cStringIO
import itertools
import optparse
import os
import Queue
import random
import re
import sys
import threading
# Add tools/ to path
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_PATH, ".."))
try:
import find_depot_tools
# Fixes a bug in Windows where some shards die upon starting
# TODO(charleslee): actually fix this bug
import subprocess2 as subprocess
except ImportError:
# Unable to find depot_tools, so just use standard subprocess
import subprocess
SS_USAGE = "python %prog [options] path/to/test [gtest_args]"
SS_DEFAULT_NUM_CORES = 4
SS_DEFAULT_SHARDS_PER_CORE = 5 # num_shards = cores * SHARDS_PER_CORE
SS_DEFAULT_RUNS_PER_CORE = 1 # num_workers = cores * RUNS_PER_CORE
SS_DEFAULT_RETRY_PERCENT = 5 # --retry-failed ignored if more than 5% fail
SS_DEFAULT_TIMEOUT = 530 # Slightly less than buildbot's default 600 seconds
def DetectNumCores():
"""Detects the number of cores on the machine.
Returns:
The number of cores on the machine or DEFAULT_NUM_CORES if it could not
be found.
"""
try:
# Linux, Unix, MacOS
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux, Unix
return int(os.sysconf("SC_NPROCESSORS_ONLN"))
else:
# OSX
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows
return int(os.environ["NUMBER_OF_PROCESSORS"])
except ValueError:
return SS_DEFAULT_NUM_CORES
def RunShard(test, num_shards, index, gtest_args, stdout, stderr):
"""Runs a single test shard in a subprocess.
Returns:
The Popen object representing the subprocess handle.
"""
args = [test]
args.extend(gtest_args)
env = os.environ.copy()
env["GTEST_TOTAL_SHARDS"] = str(num_shards)
env["GTEST_SHARD_INDEX"] = str(index)
# Use a unique log file for each shard
# Allows ui_tests to be run in parallel on the same machine
env["CHROME_LOG_FILE"] = "chrome_log_%d" % index
return subprocess.Popen(
args, stdout=stdout,
stderr=stderr,
env=env,
bufsize=0,
universal_newlines=True)
class ShardRunner(threading.Thread):
"""Worker thread that manages a single shard at a time.
Attributes:
supervisor: The ShardingSupervisor that this worker reports to.
counter: Called to get the next shard index to run.
test_start: Regex that detects when a test runs.
test_ok: Regex that detects a passing test.
test_fail: Regex that detects a failing test.
current_test: The name of the currently running test.
"""
def __init__(self, supervisor, counter, test_start, test_ok, test_fail):
"""Inits ShardRunner and sets the current test to nothing."""
threading.Thread.__init__(self)
self.supervisor = supervisor
self.counter = counter
self.test_start = test_start
self.test_ok = test_ok
self.test_fail = test_fail
self.current_test = ""
def ReportFailure(self, description, index, test_name):
"""Assembles and reports a failure line to be printed later."""
log_line = "%s (%i): %s\n" % (description, index, test_name)
self.supervisor.LogTestFailure(log_line)
def ProcessLine(self, index, line):
"""Checks a shard output line for test status, and reports a failure or
incomplete test if needed.
"""
results = self.test_start.search(line)
if results:
if self.current_test:
self.ReportFailure("INCOMPLETE", index, self.current_test)
self.current_test = results.group(1)
self.supervisor.IncrementTestCount()
return
results = self.test_ok.search(line)
if results:
self.current_test = ""
return
results = self.test_fail.search(line)
if results:
self.ReportFailure("FAILED", index, results.group(1))
self.current_test = ""
def run(self):
"""Runs shards and outputs the results.
Gets the next shard index from the supervisor, runs it in a subprocess,
and collects the output. The output is read character by character in
case the shard crashes without an ending newline. Each line is processed
as it is finished.
"""
while True:
try:
index = self.counter.get_nowait()
except Queue.Empty:
break
chars = cStringIO.StringIO()
shard_running = True
shard = RunShard(
self.supervisor.test, self.supervisor.num_shards, index,
self.supervisor.gtest_args, subprocess.PIPE, subprocess.STDOUT)
while shard_running:
char = shard.stdout.read(1)
if not char and shard.poll() is not None:
shard_running = False
chars.write(char)
if char == "\n" or not shard_running:
line = chars.getvalue()
if not line and not shard_running:
break
self.ProcessLine(index, line)
self.supervisor.LogOutputLine(index, line)
chars.close()
chars = cStringIO.StringIO()
if self.current_test:
self.ReportFailure("INCOMPLETE", index, self.current_test)
self.supervisor.ShardIndexCompleted(index)
if shard.returncode != 0:
self.supervisor.LogShardFailure(index)
class ShardingSupervisor(object):
"""Supervisor object that handles the worker threads.
Attributes:
test: Name of the test to shard.
num_shards: Total number of shards to split the test into.
num_runs: Total number of worker threads to create for running shards.
color: Indicates which coloring mode to use in the output.
original_order: True if shard output should be printed as it comes.
prefix: True if each line should indicate the shard index.
retry_percent: Integer specifying the max percent of tests to retry.
gtest_args: The options to pass to gtest.
failed_tests: List of statements from shard output indicating a failure.
failed_shards: List of shards that contained failing tests.
shards_completed: List of flags indicating which shards have finished.
shard_output: Buffer that stores the output from each shard.
test_counter: Stores the total number of tests run.
"""
SHARD_COMPLETED = object()
def __init__(self, test, num_shards, num_runs, color, original_order,
prefix, retry_percent, timeout, gtest_args):
"""Inits ShardingSupervisor with given options and gtest arguments."""
self.test = test
self.num_shards = num_shards
self.num_runs = num_runs
self.color = color
self.original_order = original_order
self.prefix = prefix
self.retry_percent = retry_percent
self.timeout = timeout
self.gtest_args = gtest_args
self.failed_tests = []
self.failed_shards = []
self.shards_completed = [False] * num_shards
self.shard_output = [Queue.Queue() for _ in range(num_shards)]
self.test_counter = itertools.count()
def ShardTest(self):
"""Runs the test and manages the worker threads.
Runs the test and outputs a summary at the end. All the tests in the
suite are run by creating (cores * runs_per_core) threads and
(cores * shards_per_core) shards. When all the worker threads have
finished, the lines saved in failed_tests are printed again. If enabled,
and failed tests that do not have FLAKY or FAILS in their names are run
again, serially, and the results are printed.
Returns:
1 if some unexpected (not FLAKY or FAILS) tests failed, 0 otherwise.
"""
# Regular expressions for parsing GTest logs. Test names look like
# SomeTestCase.SomeTest
# SomeName/SomeTestCase.SomeTest/1
# This regex also matches SomeName.SomeTest/1 and
# SomeName/SomeTestCase.SomeTest, which should be harmless.
test_name_regex = r"((\w+/)?\w+\.\w+(/\d+)?)"
# Regex for filtering out ANSI escape codes when using color.
ansi_regex = r"(?:\x1b\[.*?[a-zA-Z])?"
test_start = re.compile(
ansi_regex + r"\[\s+RUN\s+\] " + ansi_regex + test_name_regex)
test_ok = re.compile(
ansi_regex + r"\[\s+OK\s+\] " + ansi_regex + test_name_regex)
test_fail = re.compile(
ansi_regex + r"\[\s+FAILED\s+\] " + ansi_regex + test_name_regex)
workers = []
counter = Queue.Queue()
for i in range(self.num_shards):
counter.put(i)
for i in range(self.num_runs):
worker = ShardRunner(
self, counter, test_start, test_ok, test_fail)
worker.start()
workers.append(worker)
if self.original_order:
for worker in workers:
worker.join()
else:
self.WaitForShards()
num_failed = len(self.failed_shards)
if num_failed > 0:
self.failed_shards.sort()
self.WriteText(sys.stderr,
"\nFAILED SHARDS: %s\n" % str(self.failed_shards),
"\x1b[1;5;31m")
else:
self.WriteText(sys.stderr, "\nALL SHARDS PASSED!\n", "\x1b[1;5;32m")
self.PrintSummary(self.failed_tests)
if self.retry_percent < 0:
return len(self.failed_shards) > 0
self.failed_tests = [x for x in self.failed_tests if x.find("FAILS_") < 0]
self.failed_tests = [x for x in self.failed_tests if x.find("FLAKY_") < 0]
if not self.failed_tests:
return 0
return self.RetryFailedTests()
def LogTestFailure(self, line):
"""Saves a line in the lsit of failed tests to be printed at the end."""
if line not in self.failed_tests:
self.failed_tests.append(line)
def LogShardFailure(self, index):
"""Records that a test in the given shard has failed."""
self.failed_shards.append(index)
def WaitForShards(self):
"""Prints the output from each shard in consecutive order, waiting for
the current shard to finish before starting on the next shard.
"""
try:
for shard_index in range(self.num_shards):
while True:
try:
line = self.shard_output[shard_index].get(True, self.timeout)
except Queue.Empty:
# Shard timed out, notice failure and move on.
self.LogShardFailure(shard_index)
# TODO(maruel): Print last test. It'd be simpler to have the
# processing in the main thread.
# TODO(maruel): Make sure the worker thread terminates.
sys.stdout.write('TIMED OUT\n\n')
LogTestFailure(
'FAILURE: SHARD %d TIMED OUT; %d seconds' % (
shard_index, self.timeout))
break
if line is self.SHARD_COMPLETED:
break
sys.stdout.write(line)
except:
sys.stdout.flush()
print >> sys.stderr, 'CAUGHT EXCEPTION: dumping remaining data:'
for shard_index in range(self.num_shards):
while True:
try:
line = self.shard_output[shard_index].get(False)
except Queue.Empty:
# Shard timed out, notice failure and move on.
self.LogShardFailure(shard_index)
break
if line is self.SHARD_COMPLETED:
break
sys.stderr.write(line)
raise
def LogOutputLine(self, index, line):
"""Either prints the shard output line immediately or saves it in the
output buffer, depending on the settings. Also optionally adds a prefix.
"""
if self.prefix:
line = "%i>%s" % (index, line)
if self.original_order:
sys.stdout.write(line)
else:
self.shard_output[index].put(line)
def IncrementTestCount(self):
"""Increments the number of tests run. This is relevant to the
--retry-percent option.
"""
self.test_counter.next()
def ShardIndexCompleted(self, index):
"""Records that a shard has finished so the output from the next shard
can now be printed.
"""
self.shard_output[index].put(self.SHARD_COMPLETED)
def RetryFailedTests(self):
"""Reruns any failed tests serially and prints another summary of the
results if no more than retry_percent failed.
"""
num_tests_run = self.test_counter.next()
if len(self.failed_tests) > self.retry_percent * num_tests_run:
sys.stderr.write("\nNOT RETRYING FAILED TESTS (too many failed)\n")
return 1
self.WriteText(sys.stderr, "\nRETRYING FAILED TESTS:\n", "\x1b[1;5;33m")
sharded_description = re.compile(r": (?:\d+>)?(.*)")
gtest_filters = [sharded_description.search(line).group(1)
for line in self.failed_tests]
failed_retries = []
for test_filter in gtest_filters:
args = [self.test, "--gtest_filter=" + test_filter]
args.extend(self.gtest_args)
rerun = subprocess.Popen(args)
rerun.wait()
if rerun.returncode != 0:
failed_retries.append(test_filter)
self.WriteText(sys.stderr, "RETRY RESULTS:\n", "\x1b[1;5;33m")
self.PrintSummary(failed_retries)
return len(failed_retries) > 0
def PrintSummary(self, failed_tests):
"""Prints a summary of the test results.
If any shards had failing tests, the list is sorted and printed. Then all
the lines that indicate a test failure are reproduced.
"""
if failed_tests:
self.WriteText(sys.stderr, "FAILED TESTS:\n", "\x1b[1;5;31m")
for line in failed_tests:
sys.stderr.write(line)
else:
self.WriteText(sys.stderr, "ALL TESTS PASSED!\n", "\x1b[1;5;32m")
def WriteText(self, pipe, text, ansi):
"""Writes the text to the pipe with the ansi escape code, if colored
output is set, for Unix systems.
"""
if self.color:
pipe.write(ansi)
pipe.write(text)
if self.color:
pipe.write("\x1b[m")
def main():
parser = optparse.OptionParser(usage=SS_USAGE)
parser.add_option(
"-n", "--shards_per_core", type="int", default=SS_DEFAULT_SHARDS_PER_CORE,
help="number of shards to generate per CPU")
parser.add_option(
"-r", "--runs_per_core", type="int", default=SS_DEFAULT_RUNS_PER_CORE,
help="number of shards to run in parallel per CPU")
parser.add_option(
"-c", "--color", action="store_true",
default=sys.platform != "win32" and sys.stdout.isatty(),
help="force color output, also used by gtest if --gtest_color is not"
" specified")
parser.add_option(
"--no-color", action="store_false", dest="color",
help="disable color output")
parser.add_option(
"-s", "--runshard", type="int", help="single shard index to run")
parser.add_option(
"--reorder", action="store_true",
help="ensure that all output from an earlier shard is printed before"
" output from a later shard")
# TODO(charleslee): for backwards compatibility with master.cfg file
parser.add_option(
"--original-order", action="store_true",
help="print shard output in its orginal jumbled order of execution"
" (useful for debugging flaky tests)")
parser.add_option(
"--prefix", action="store_true",
help="prefix each line of shard output with 'N>', where N is the shard"
" index (forced True when --original-order is True)")
parser.add_option(
"--random-seed", action="store_true",
help="shuffle the tests with a random seed value")
parser.add_option(
"--retry-failed", action="store_true",
help="retry tests that did not pass serially")
parser.add_option(
"--retry-percent", type="int",
default=SS_DEFAULT_RETRY_PERCENT,
help="ignore --retry-failed if more than this percent fail [0, 100]"
" (default = %i)" % SS_DEFAULT_RETRY_PERCENT)
parser.add_option(
"-t", "--timeout", type="int", default=SS_DEFAULT_TIMEOUT,
help="timeout in seconds to wait for a shard (default=%default s)")
parser.disable_interspersed_args()
(options, args) = parser.parse_args()
if not args:
parser.error("You must specify a path to test!")
if not os.path.exists(args[0]):
parser.error("%s does not exist!" % args[0])
num_cores = DetectNumCores()
if options.shards_per_core < 1:
parser.error("You must have at least 1 shard per core!")
num_shards = num_cores * options.shards_per_core
if options.runs_per_core < 1:
parser.error("You must have at least 1 run per core!")
num_runs = num_cores * options.runs_per_core
gtest_args = ["--gtest_color=%s" % {
True: "yes", False: "no"}[options.color]] + args[1:]
if options.original_order:
options.prefix = True
# TODO(charleslee): for backwards compatibility with buildbot's log_parser
if options.reorder:
options.original_order = False
options.prefix = True
if options.random_seed:
seed = random.randint(1, 99999)
gtest_args.extend(["--gtest_shuffle", "--gtest_random_seed=%i" % seed])
if options.retry_failed:
if options.retry_percent < 0 or options.retry_percent > 100:
parser.error("Retry percent must be an integer [0, 100]!")
else:
options.retry_percent = -1
if options.runshard != None:
# run a single shard and exit
if (options.runshard < 0 or options.runshard >= num_shards):
parser.error("Invalid shard number given parameters!")
shard = RunShard(
args[0], num_shards, options.runshard, gtest_args, None, None)
shard.communicate()
return shard.poll()
# shard and run the whole test
ss = ShardingSupervisor(
args[0], num_shards, num_runs, options.color, options.original_order,
options.prefix, options.retry_percent, options.timeout, gtest_args)
return ss.ShardTest()
if __name__ == "__main__":
sys.exit(main())
|
|
"""Z-Wave Constants."""
ATTR_NODE_ID = "node_id"
ATTR_VALUE_ID = "value_id"
ATTR_OBJECT_ID = "object_id"
ATTR_NAME = "name"
ATTR_SCENE_ID = "scene_id"
ATTR_BASIC_LEVEL = "basic_level"
ATTR_CONFIG_PARAMETER = "parameter"
ATTR_CONFIG_SIZE = "size"
ATTR_CONFIG_VALUE = "value"
NETWORK_READY_WAIT_SECS = 30
SERVICE_ADD_NODE = "add_node"
SERVICE_ADD_NODE_SECURE = "add_node_secure"
SERVICE_REMOVE_NODE = "remove_node"
SERVICE_CANCEL_COMMAND = "cancel_command"
SERVICE_HEAL_NETWORK = "heal_network"
SERVICE_SOFT_RESET = "soft_reset"
SERVICE_TEST_NETWORK = "test_network"
SERVICE_SET_CONFIG_PARAMETER = "set_config_parameter"
SERVICE_STOP_NETWORK = "stop_network"
SERVICE_START_NETWORK = "start_network"
SERVICE_RENAME_NODE = "rename_node"
EVENT_SCENE_ACTIVATED = "zwave.scene_activated"
EVENT_NODE_EVENT = "zwave.node_event"
EVENT_NETWORK_READY = "zwave.network_ready"
EVENT_NETWORK_COMPLETE = "zwave.network_complete"
EVENT_NETWORK_START = "zwave.network_start"
EVENT_NETWORK_STOP = "zwave.network_stop"
COMMAND_CLASS_ALARM = 113
COMMAND_CLASS_ANTITHEFT = 93
COMMAND_CLASS_APPLICATION_CAPABILITY = 87
COMMAND_CLASS_APPLICATION_STATUS = 34
COMMAND_CLASS_ASSOCIATION = 133
COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION = 155
COMMAND_CLASS_ASSOCIATION_GRP_INFO = 89
COMMAND_CLASS_BARRIER_OPERATOR = 102
COMMAND_CLASS_BASIC = 32
COMMAND_CLASS_BASIC_TARIFF_INFO = 54
COMMAND_CLASS_BASIC_WINDOW_COVERING = 80
COMMAND_CLASS_BATTERY = 128
COMMAND_CLASS_CENTRAL_SCENE = 91
COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE = 70
COMMAND_CLASS_CLOCK = 129
COMMAND_CLASS_CONFIGURATION = 112
COMMAND_CLASS_CONTROLLER_REPLICATION = 33
COMMAND_CLASS_CRC_16_ENCAP = 86
COMMAND_CLASS_DCP_CONFIG = 58
COMMAND_CLASS_DCP_MONITOR = 59
COMMAND_CLASS_DEVICE_RESET_LOCALLY = 90
COMMAND_CLASS_DOOR_LOCK = 98
COMMAND_CLASS_DOOR_LOCK_LOGGING = 76
COMMAND_CLASS_ENERGY_PRODUCTION = 144
COMMAND_CLASS_ENTRY_CONTROL = 111
COMMAND_CLASS_FIRMWARE_UPDATE_MD = 122
COMMAND_CLASS_GEOGRAPHIC_LOCATION = 140
COMMAND_CLASS_GROUPING_NAME = 123
COMMAND_CLASS_HAIL = 130
COMMAND_CLASS_HRV_CONTROL = 57
COMMAND_CLASS_HRV_STATUS = 55
COMMAND_CLASS_HUMIDITY_CONTROL_MODE = 109
COMMAND_CLASS_HUMIDITY_CONTROL_OPERATING_STATE = 110
COMMAND_CLASS_HUMIDITY_CONTROL_SETPOINT = 100
COMMAND_CLASS_INDICATOR = 135
COMMAND_CLASS_IP_ASSOCIATION = 92
COMMAND_CLASS_IP_CONFIGURATION = 14
COMMAND_CLASS_IRRIGATION = 107
COMMAND_CLASS_LANGUAGE = 137
COMMAND_CLASS_LOCK = 118
COMMAND_CLASS_MAILBOX = 105
COMMAND_CLASS_MANUFACTURER_PROPRIETARY = 145
COMMAND_CLASS_MANUFACTURER_SPECIFIC = 114
COMMAND_CLASS_MARK = 239
COMMAND_CLASS_METER = 50
COMMAND_CLASS_METER_PULSE = 53
COMMAND_CLASS_METER_TBL_CONFIG = 60
COMMAND_CLASS_METER_TBL_MONITOR = 61
COMMAND_CLASS_METER_TBL_PUSH = 62
COMMAND_CLASS_MTP_WINDOW_COVERING = 81
COMMAND_CLASS_MULTI_CHANNEL = 96
COMMAND_CLASS_MULTI_CHANNEL_ASSOCIATION = 142
COMMAND_CLASS_MULTI_COMMAND = 143
COMMAND_CLASS_NETWORK_MANAGEMENT_BASIC = 77
COMMAND_CLASS_NETWORK_MANAGEMENT_INCLUSION = 52
COMMAND_CLASS_NETWORK_MANAGEMENT_PRIMARY = 84
COMMAND_CLASS_NETWORK_MANAGEMENT_PROXY = 82
COMMAND_CLASS_NO_OPERATION = 0
COMMAND_CLASS_NODE_NAMING = 119
COMMAND_CLASS_NON_INTEROPERABLE = 240
COMMAND_CLASS_NOTIFICATION = 113
COMMAND_CLASS_POWERLEVEL = 115
COMMAND_CLASS_PREPAYMENT = 63
COMMAND_CLASS_PREPAYMENT_ENCAPSULATION = 65
COMMAND_CLASS_PROPRIETARY = 136
COMMAND_CLASS_PROTECTION = 117
COMMAND_CLASS_RATE_TBL_CONFIG = 72
COMMAND_CLASS_RATE_TBL_MONITOR = 73
COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE = 124
COMMAND_CLASS_REMOTE_ASSOCIATION = 125
COMMAND_CLASS_SCENE_ACTIVATION = 43
COMMAND_CLASS_SCENE_ACTUATOR_CONF = 44
COMMAND_CLASS_SCENE_CONTROLLER_CONF = 45
COMMAND_CLASS_SCHEDULE = 83
COMMAND_CLASS_SCHEDULE_ENTRY_LOCK = 78
COMMAND_CLASS_SCREEN_ATTRIBUTES = 147
COMMAND_CLASS_SCREEN_MD = 146
COMMAND_CLASS_SECURITY = 152
COMMAND_CLASS_SECURITY_SCHEME0_MARK = 61696
COMMAND_CLASS_SENSOR_ALARM = 156
COMMAND_CLASS_SENSOR_BINARY = 48
COMMAND_CLASS_SENSOR_CONFIGURATION = 158
COMMAND_CLASS_SENSOR_MULTILEVEL = 49
COMMAND_CLASS_SILENCE_ALARM = 157
COMMAND_CLASS_SIMPLE_AV_CONTROL = 148
COMMAND_CLASS_SUPERVISION = 108
COMMAND_CLASS_SWITCH_ALL = 39
COMMAND_CLASS_SWITCH_BINARY = 37
COMMAND_CLASS_SWITCH_COLOR = 51
COMMAND_CLASS_SWITCH_MULTILEVEL = 38
COMMAND_CLASS_SWITCH_TOGGLE_BINARY = 40
COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL = 41
COMMAND_CLASS_TARIFF_TBL_CONFIG = 74
COMMAND_CLASS_TARIFF_TBL_MONITOR = 75
COMMAND_CLASS_THERMOSTAT_FAN_MODE = 68
COMMAND_CLASS_THERMOSTAT_FAN_STATE = 69
COMMAND_CLASS_THERMOSTAT_MODE = 64
COMMAND_CLASS_THERMOSTAT_OPERATING_STATE = 66
COMMAND_CLASS_THERMOSTAT_SETBACK = 71
COMMAND_CLASS_THERMOSTAT_SETPOINT = 67
COMMAND_CLASS_TIME = 138
COMMAND_CLASS_TIME_PARAMETERS = 139
COMMAND_CLASS_TRANSPORT_SERVICE = 85
COMMAND_CLASS_USER_CODE = 99
COMMAND_CLASS_VERSION = 134
COMMAND_CLASS_WAKE_UP = 132
COMMAND_CLASS_ZIP = 35
COMMAND_CLASS_ZIP_NAMING = 104
COMMAND_CLASS_ZIP_ND = 88
COMMAND_CLASS_ZIP_6LOWPAN = 79
COMMAND_CLASS_ZIP_GATEWAY = 95
COMMAND_CLASS_ZIP_PORTAL = 97
COMMAND_CLASS_ZWAVEPLUS_INFO = 94
COMMAND_CLASS_WHATEVER = None # Match ALL
COMMAND_CLASS_WINDOW_COVERING = 106
GENERIC_TYPE_WHATEVER = None # Match ALL
SPECIFIC_TYPE_WHATEVER = None # Match ALL
SPECIFIC_TYPE_NOT_USED = 0 # Available in all Generic types
GENERIC_TYPE_AV_CONTROL_POINT = 3
SPECIFIC_TYPE_DOORBELL = 18
SPECIFIC_TYPE_SATELLITE_RECIEVER = 4
SPECIFIC_TYPE_SATELLITE_RECIEVER_V2 = 17
GENERIC_TYPE_DISPLAY = 4
SPECIFIC_TYPE_SIMPLE_DISPLAY = 1
GENERIC_TYPE_ENTRY_CONTROL = 64
SPECIFIC_TYPE_DOOR_LOCK = 1
SPECIFIC_TYPE_ADVANCED_DOOR_LOCK = 2
SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK = 3
SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK_DEADBOLT = 4
SPECIFIC_TYPE_SECURE_DOOR = 5
SPECIFIC_TYPE_SECURE_GATE = 6
SPECIFIC_TYPE_SECURE_BARRIER_ADDON = 7
SPECIFIC_TYPE_SECURE_BARRIER_OPEN_ONLY = 8
SPECIFIC_TYPE_SECURE_BARRIER_CLOSE_ONLY = 9
SPECIFIC_TYPE_SECURE_LOCKBOX = 10
SPECIFIC_TYPE_SECURE_KEYPAD = 11
GENERIC_TYPE_GENERIC_CONTROLLER = 1
SPECIFIC_TYPE_PORTABLE_CONTROLLER = 1
SPECIFIC_TYPE_PORTABLE_SCENE_CONTROLLER = 2
SPECIFIC_TYPE_PORTABLE_INSTALLER_TOOL = 3
SPECIFIC_TYPE_REMOTE_CONTROL_AV = 4
SPECIFIC_TYPE_REMOTE_CONTROL_SIMPLE = 6
GENERIC_TYPE_METER = 49
SPECIFIC_TYPE_SIMPLE_METER = 1
SPECIFIC_TYPE_ADV_ENERGY_CONTROL = 2
SPECIFIC_TYPE_WHOLE_HOME_METER_SIMPLE = 3
GENERIC_TYPE_METER_PULSE = 48
GENERIC_TYPE_NON_INTEROPERABLE = 255
GENERIC_TYPE_REPEATER_SLAVE = 15
SPECIFIC_TYPE_REPEATER_SLAVE = 1
SPECIFIC_TYPE_VIRTUAL_NODE = 2
GENERIC_TYPE_SECURITY_PANEL = 23
SPECIFIC_TYPE_ZONED_SECURITY_PANEL = 1
GENERIC_TYPE_SEMI_INTEROPERABLE = 80
SPECIFIC_TYPE_ENERGY_PRODUCTION = 1
GENERIC_TYPE_SENSOR_ALARM = 161
SPECIFIC_TYPE_ADV_ZENSOR_NET_ALARM_SENSOR = 5
SPECIFIC_TYPE_ADV_ZENSOR_NET_SMOKE_SENSOR = 10
SPECIFIC_TYPE_BASIC_ROUTING_ALARM_SENSOR = 1
SPECIFIC_TYPE_BASIC_ROUTING_SMOKE_SENSOR = 6
SPECIFIC_TYPE_BASIC_ZENSOR_NET_ALARM_SENSOR = 3
SPECIFIC_TYPE_BASIC_ZENSOR_NET_SMOKE_SENSOR = 8
SPECIFIC_TYPE_ROUTING_ALARM_SENSOR = 2
SPECIFIC_TYPE_ROUTING_SMOKE_SENSOR = 7
SPECIFIC_TYPE_ZENSOR_NET_ALARM_SENSOR = 4
SPECIFIC_TYPE_ZENSOR_NET_SMOKE_SENSOR = 9
SPECIFIC_TYPE_ALARM_SENSOR = 11
GENERIC_TYPE_SENSOR_BINARY = 32
SPECIFIC_TYPE_ROUTING_SENSOR_BINARY = 1
GENERIC_TYPE_SENSOR_MULTILEVEL = 33
SPECIFIC_TYPE_ROUTING_SENSOR_MULTILEVEL = 1
SPECIFIC_TYPE_CHIMNEY_FAN = 2
GENERIC_TYPE_STATIC_CONTROLLER = 2
SPECIFIC_TYPE_PC_CONTROLLER = 1
SPECIFIC_TYPE_SCENE_CONTROLLER = 2
SPECIFIC_TYPE_STATIC_INSTALLER_TOOL = 3
SPECIFIC_TYPE_SET_TOP_BOX = 4
SPECIFIC_TYPE_SUB_SYSTEM_CONTROLLER = 5
SPECIFIC_TYPE_TV = 6
SPECIFIC_TYPE_GATEWAY = 7
GENERIC_TYPE_SWITCH_BINARY = 16
SPECIFIC_TYPE_POWER_SWITCH_BINARY = 1
SPECIFIC_TYPE_SCENE_SWITCH_BINARY = 3
SPECIFIC_TYPE_POWER_STRIP = 4
SPECIFIC_TYPE_SIREN = 5
SPECIFIC_TYPE_VALVE_OPEN_CLOSE = 6
SPECIFIC_TYPE_COLOR_TUNABLE_BINARY = 2
SPECIFIC_TYPE_IRRIGATION_CONTROLLER = 7
GENERIC_TYPE_SWITCH_MULTILEVEL = 17
SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL = 5
SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL = 6
SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL = 7
SPECIFIC_TYPE_MOTOR_MULTIPOSITION = 3
SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL = 1
SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL = 4
SPECIFIC_TYPE_FAN_SWITCH = 8
SPECIFIC_TYPE_COLOR_TUNABLE_MULTILEVEL = 2
GENERIC_TYPE_SWITCH_REMOTE = 18
SPECIFIC_TYPE_REMOTE_BINARY = 1
SPECIFIC_TYPE_REMOTE_MULTILEVEL = 2
SPECIFIC_TYPE_REMOTE_TOGGLE_BINARY = 3
SPECIFIC_TYPE_REMOTE_TOGGLE_MULTILEVEL = 4
GENERIC_TYPE_SWITCH_TOGGLE = 19
SPECIFIC_TYPE_SWITCH_TOGGLE_BINARY = 1
SPECIFIC_TYPE_SWITCH_TOGGLE_MULTILEVEL = 2
GENERIC_TYPE_THERMOSTAT = 8
SPECIFIC_TYPE_SETBACK_SCHEDULE_THERMOSTAT = 3
SPECIFIC_TYPE_SETBACK_THERMOSTAT = 5
SPECIFIC_TYPE_SETPOINT_THERMOSTAT = 4
SPECIFIC_TYPE_THERMOSTAT_GENERAL = 2
SPECIFIC_TYPE_THERMOSTAT_GENERAL_V2 = 6
SPECIFIC_TYPE_THERMOSTAT_HEATING = 1
GENERIC_TYPE_VENTILATION = 22
SPECIFIC_TYPE_RESIDENTIAL_HRV = 1
GENERIC_TYPE_WINDOWS_COVERING = 9
SPECIFIC_TYPE_SIMPLE_WINDOW_COVERING = 1
GENERIC_TYPE_ZIP_NODE = 21
SPECIFIC_TYPE_ZIP_ADV_NODE = 2
SPECIFIC_TYPE_ZIP_TUN_NODE = 1
GENERIC_TYPE_WALL_CONTROLLER = 24
SPECIFIC_TYPE_BASIC_WALL_CONTROLLER = 1
GENERIC_TYPE_NETWORK_EXTENDER = 5
SPECIFIC_TYPE_SECURE_EXTENDER = 1
GENERIC_TYPE_APPLIANCE = 6
SPECIFIC_TYPE_GENERAL_APPLIANCE = 1
SPECIFIC_TYPE_KITCHEN_APPLIANCE = 2
SPECIFIC_TYPE_LAUNDRY_APPLIANCE = 3
GENERIC_TYPE_SENSOR_NOTIFICATION = 7
SPECIFIC_TYPE_NOTIFICATION_SENSOR = 1
GENRE_WHATEVER = None
GENRE_USER = "User"
TYPE_WHATEVER = None
TYPE_BYTE = "Byte"
TYPE_BOOL = "Bool"
TYPE_DECIMAL = "Decimal"
|
|
# -*- coding: utf-8 -*-
#
# :copyright: (c) 2020 by Pavlo Dmytrenko.
# :license: MIT, see LICENSE for more details.
"""
yaspin.yaspin
~~~~~~~~~~~~~
A lightweight terminal spinner.
"""
from __future__ import absolute_import
import contextlib
import functools
import itertools
import signal
import sys
import threading
import time
import colorama
from pipenv.vendor.vistir import cursor
from .base_spinner import default_spinner
from .compat import PY2, basestring, builtin_str, bytes, iteritems, str
from .constants import COLOR_ATTRS, COLOR_MAP, ENCODING, SPINNER_ATTRS
from .helpers import to_unicode
from .termcolor import colored
colorama.init()
class Yaspin(object):
"""Implements a context manager that spawns a thread
to write spinner frames into a tty (stdout) during
context execution.
"""
# When Python finds its output attached to a terminal,
# it sets the sys.stdout.encoding attribute to the terminal's encoding.
# The print statement's handler will automatically encode unicode
# arguments into bytes.
#
# In Py2 when piping or redirecting output, Python does not detect
# the desired character set of the output, it sets sys.stdout.encoding
# to None, and print will invoke the default "ascii" codec.
#
# Py3 invokes "UTF-8" codec by default.
#
# Thats why in Py2, output should be encoded manually with desired
# encoding in order to support pipes and redirects.
def __init__(
self,
spinner=None,
text="",
color=None,
on_color=None,
attrs=None,
reversal=False,
side="left",
sigmap=None,
):
# Spinner
self._spinner = self._set_spinner(spinner)
self._frames = self._set_frames(self._spinner, reversal)
self._interval = self._set_interval(self._spinner)
self._cycle = self._set_cycle(self._frames)
# Color Specification
self._color = self._set_color(color) if color else color
self._on_color = self._set_on_color(on_color) if on_color else on_color
self._attrs = self._set_attrs(attrs) if attrs else set()
self._color_func = self._compose_color_func()
# Other
self._text = self._set_text(text)
self._side = self._set_side(side)
self._reversal = reversal
# Helper flags
self._stop_spin = None
self._hide_spin = None
self._spin_thread = None
self._last_frame = None
self._stdout_lock = threading.Lock()
self._hidden_level = 0
# Signals
# In Python 2 signal.SIG* are of type int.
# In Python 3 signal.SIG* are enums.
#
# Signal = Union[enum.Enum, int]
# SigHandler = Union[enum.Enum, Callable]
self._sigmap = sigmap if sigmap else {} # Dict[Signal, SigHandler]
# Maps signals to their default handlers in order to reset
# custom handlers set by ``sigmap`` at the cleanup phase.
self._dfl_sigmap = {} # Dict[Signal, SigHandler]
#
# Dunders
#
def __repr__(self):
repr_ = u"<Yaspin frames={0!s}>".format(self._frames)
if PY2:
return repr_.encode(ENCODING)
return repr_
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, traceback):
# Avoid stop() execution for the 2nd time
if self._spin_thread.is_alive():
self.stop()
return False # nothing is handled
def __call__(self, fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
def __getattr__(self, name):
# CLI spinners
if name in SPINNER_ATTRS:
from .spinners import Spinners
sp = getattr(Spinners, name)
self.spinner = sp
# Color Attributes: "color", "on_color", "attrs"
elif name in COLOR_ATTRS:
attr_type = COLOR_MAP[name]
# Call appropriate property setters;
# _color_func is updated automatically by setters.
if attr_type == "attrs":
self.attrs = [name] # calls property setter
if attr_type in ("color", "on_color"):
setattr(self, attr_type, name) # calls property setter
# Side: "left" or "right"
elif name in ("left", "right"):
self.side = name # calls property setter
# Common error for unsupported attributes
else:
raise AttributeError(
"'{0}' object has no attribute: '{1}'".format(
self.__class__.__name__, name
)
)
return self
#
# Properties
#
@property
def spinner(self):
return self._spinner
@spinner.setter
def spinner(self, sp):
self._spinner = self._set_spinner(sp)
self._frames = self._set_frames(self._spinner, self._reversal)
self._interval = self._set_interval(self._spinner)
self._cycle = self._set_cycle(self._frames)
@property
def text(self):
return self._text
@text.setter
def text(self, txt):
self._text = self._set_text(txt)
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = self._set_color(value) if value else value
self._color_func = self._compose_color_func() # update
@property
def on_color(self):
return self._on_color
@on_color.setter
def on_color(self, value):
self._on_color = self._set_on_color(value) if value else value
self._color_func = self._compose_color_func() # update
@property
def attrs(self):
return list(self._attrs)
@attrs.setter
def attrs(self, value):
new_attrs = self._set_attrs(value) if value else set()
self._attrs = self._attrs.union(new_attrs)
self._color_func = self._compose_color_func() # update
@property
def side(self):
return self._side
@side.setter
def side(self, value):
self._side = self._set_side(value)
@property
def reversal(self):
return self._reversal
@reversal.setter
def reversal(self, value):
self._reversal = value
self._frames = self._set_frames(self._spinner, self._reversal)
self._cycle = self._set_cycle(self._frames)
#
# Public
#
def start(self):
if self._sigmap:
self._register_signal_handlers()
if sys.stdout.isatty():
self._hide_cursor()
self._stop_spin = threading.Event()
self._hide_spin = threading.Event()
self._spin_thread = threading.Thread(target=self._spin)
self._spin_thread.start()
def stop(self):
if self._dfl_sigmap:
# Reset registered signal handlers to default ones
self._reset_signal_handlers()
if self._spin_thread:
self._stop_spin.set()
self._spin_thread.join()
sys.stdout.write("\r")
self._clear_line()
if sys.stdout.isatty():
self._show_cursor()
def hide(self):
"""Hide the spinner to allow for custom writing to the terminal."""
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and not self._hide_spin.is_set():
with self._stdout_lock:
# set the hidden spinner flag
self._hide_spin.set()
# clear the current line
sys.stdout.write("\r")
self._clear_line()
# flush the stdout buffer so the current line
# can be rewritten to
sys.stdout.flush()
@contextlib.contextmanager
def hidden(self):
"""Hide the spinner within a block, can be nested"""
if self._hidden_level == 0:
self.hide()
self._hidden_level += 1
try:
yield
finally:
self._hidden_level -= 1
if self._hidden_level == 0:
self.show()
def show(self):
"""Show the hidden spinner."""
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and self._hide_spin.is_set():
with self._stdout_lock:
# clear the hidden spinner flag
self._hide_spin.clear()
# clear the current line so the spinner is not appended to it
sys.stdout.write("\r")
self._clear_line()
def write(self, text):
"""Write text in the terminal without breaking the spinner."""
# similar to tqdm.write()
# https://pypi.python.org/pypi/tqdm#writing-messages
with self._stdout_lock:
sys.stdout.write("\r")
self._clear_line()
_text = to_unicode(text)
if PY2:
_text = _text.encode(ENCODING)
# Ensure output is bytes for Py2 and Unicode for Py3
assert isinstance(_text, builtin_str)
sys.stdout.write("{0}\n".format(_text))
def ok(self, text="OK"):
"""Set Ok (success) finalizer to a spinner."""
_text = text if text else "OK"
self._freeze(_text)
def fail(self, text="FAIL"):
"""Set fail finalizer to a spinner."""
_text = text if text else "FAIL"
self._freeze(_text)
#
# Protected
#
def _freeze(self, final_text):
"""Stop spinner, compose last frame and 'freeze' it."""
text = to_unicode(final_text)
self._last_frame = self._compose_out(text, mode="last")
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
with self._stdout_lock:
sys.stdout.write(self._last_frame)
def _spin(self):
while not self._stop_spin.is_set():
if self._hide_spin.is_set():
# Wait a bit to avoid wasting cycles
time.sleep(self._interval)
continue
# Compose output
spin_phase = next(self._cycle)
out = self._compose_out(spin_phase)
# Write
with self._stdout_lock:
sys.stdout.write(out)
self._clear_line()
sys.stdout.flush()
# Wait
time.sleep(self._interval)
def _compose_color_func(self):
fn = functools.partial(
colored,
color=self._color,
on_color=self._on_color,
attrs=list(self._attrs),
)
return fn
def _compose_out(self, frame, mode=None):
# Ensure Unicode input
assert isinstance(frame, str)
assert isinstance(self._text, str)
frame = frame.encode(ENCODING) if PY2 else frame
text = self._text.encode(ENCODING) if PY2 else self._text
# Colors
if self._color_func is not None:
frame = self._color_func(frame)
# Position
if self._side == "right":
frame, text = text, frame
# Mode
if not mode:
out = "\r{0} {1}".format(frame, text)
else:
out = "{0} {1}\n".format(frame, text)
# Ensure output is bytes for Py2 and Unicode for Py3
assert isinstance(out, builtin_str)
return out
def _register_signal_handlers(self):
# SIGKILL cannot be caught or ignored, and the receiving
# process cannot perform any clean-up upon receiving this
# signal.
try:
if signal.SIGKILL in self._sigmap.keys():
raise ValueError(
"Trying to set handler for SIGKILL signal. "
"SIGKILL cannot be cought or ignored in POSIX systems."
)
except AttributeError:
pass
for sig, sig_handler in iteritems(self._sigmap):
# A handler for a particular signal, once set, remains
# installed until it is explicitly reset. Store default
# signal handlers for subsequent reset at cleanup phase.
dfl_handler = signal.getsignal(sig)
self._dfl_sigmap[sig] = dfl_handler
# ``signal.SIG_DFL`` and ``signal.SIG_IGN`` are also valid
# signal handlers and are not callables.
if callable(sig_handler):
# ``signal.signal`` accepts handler function which is
# called with two arguments: signal number and the
# interrupted stack frame. ``functools.partial`` solves
# the problem of passing spinner instance into the handler
# function.
sig_handler = functools.partial(sig_handler, spinner=self)
signal.signal(sig, sig_handler)
def _reset_signal_handlers(self):
for sig, sig_handler in iteritems(self._dfl_sigmap):
signal.signal(sig, sig_handler)
#
# Static
#
@staticmethod
def _set_color(value):
# type: (str) -> str
available_values = [k for k, v in iteritems(COLOR_MAP) if v == "color"]
if value not in available_values:
raise ValueError(
"'{0}': unsupported color value. Use one of the: {1}".format(
value, ", ".join(available_values)
)
)
return value
@staticmethod
def _set_on_color(value):
# type: (str) -> str
available_values = [
k for k, v in iteritems(COLOR_MAP) if v == "on_color"
]
if value not in available_values:
raise ValueError(
"'{0}': unsupported on_color value. "
"Use one of the: {1}".format(
value, ", ".join(available_values)
)
)
return value
@staticmethod
def _set_attrs(attrs):
# type: (List[str]) -> Set[str]
available_values = [k for k, v in iteritems(COLOR_MAP) if v == "attrs"]
for attr in attrs:
if attr not in available_values:
raise ValueError(
"'{0}': unsupported attribute value. "
"Use one of the: {1}".format(
attr, ", ".join(available_values)
)
)
return set(attrs)
@staticmethod
def _set_spinner(spinner):
if hasattr(spinner, "frames") and hasattr(spinner, "interval"):
if not spinner.frames or not spinner.interval:
sp = default_spinner
else:
sp = spinner
else:
sp = default_spinner
return sp
@staticmethod
def _set_side(side):
# type: (str) -> str
if side not in ("left", "right"):
raise ValueError(
"'{0}': unsupported side value. "
"Use either 'left' or 'right'."
)
return side
@staticmethod
def _set_frames(spinner, reversal):
# type: (base_spinner.Spinner, bool) -> Union[str, List]
uframes = None # unicode frames
uframes_seq = None # sequence of unicode frames
if isinstance(spinner.frames, basestring):
uframes = to_unicode(spinner.frames) if PY2 else spinner.frames
# TODO (pavdmyt): support any type that implements iterable
if isinstance(spinner.frames, (list, tuple)):
# Empty ``spinner.frames`` is handled by ``Yaspin._set_spinner``
if spinner.frames and isinstance(spinner.frames[0], bytes):
uframes_seq = [to_unicode(frame) for frame in spinner.frames]
else:
uframes_seq = spinner.frames
_frames = uframes or uframes_seq
if not _frames:
# Empty ``spinner.frames`` is handled by ``Yaspin._set_spinner``.
# This code is very unlikely to be executed. However, it's still
# here to be on a safe side.
raise ValueError(
"{0!r}: no frames found in spinner".format(spinner)
)
# Builtin ``reversed`` returns reverse iterator,
# which adds unnecessary difficulty for returning
# unicode value;
# Hence using [::-1] syntax
frames = _frames[::-1] if reversal else _frames
return frames
@staticmethod
def _set_interval(spinner):
# Milliseconds to Seconds
return spinner.interval * 0.001
@staticmethod
def _set_cycle(frames):
return itertools.cycle(frames)
@staticmethod
def _set_text(text):
if PY2:
return to_unicode(text)
return text
@staticmethod
def _hide_cursor():
cursor.hide_cursor()
@staticmethod
def _show_cursor():
cursor.show_cursor()
@staticmethod
def _clear_line():
sys.stdout.write(chr(27) + "[K")
|
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration test to see if the image promotion process is working for the
Spinnaker Kubernetes V2 integration.
"""
# Standard python modules.
import sys
import random
import string
# citest modules.
import citest.kube_testing as kube
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
import spinnaker_testing.frigga as frigga
import citest.base
ov_factory = jc.ObservationPredicateFactory()
class KubeV2ArtifactTestScenario(sk.SpinnakerTestScenario):
"""Defines the scenario for the kube v2 artifact test.
"""
@classmethod
def new_agent(cls, bindings):
"""Implements citest.service_testing.AgentTestScenario.new_agent."""
agent = gate.new_agent(bindings)
agent.default_max_wait_secs = 180
return agent
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumentParser
"""
super(KubeV2ArtifactTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
defaults = defaults or {}
parser.add_argument(
'--test_namespace', default='default',
help='The namespace to manage within the tests.')
def __init__(self, bindings, agent=None):
"""Constructor.
Args:
bindings: [dict] The data bindings to use to configure the scenario.
agent: [GateAgent] The agent for invoking the test operations on Gate.
"""
super(KubeV2ArtifactTestScenario, self).__init__(bindings, agent)
bindings = self.bindings
# We'll call out the app name because it is widely used
# because it scopes the context of our activities.
# pylint: disable=invalid-name
self.TEST_APP = bindings['TEST_APP']
# Take just the first if there are multiple
# because some uses below assume just one.
self.TEST_NAMESPACE = bindings['TEST_NAMESPACE'].split(',')[0]
self.mf = sk.KubernetesManifestFactory(self)
self.mp = sk.KubernetesManifestPredicateFactory()
self.ps = sk.PipelineSupport(self)
def create_app(self):
"""Creates OperationContract that creates a new Spinnaker Application."""
contract = jc.Contract()
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings, application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT']),
contract=contract)
def delete_app(self):
"""Creates OperationContract that deletes a new Spinnaker Application."""
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT']),
contract=contract)
def __docker_image_artifact(self, name, image):
id_ = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
return {
'type': 'docker/image',
'name': name,
'reference': image,
'uuid': id_
}
def deploy_unversioned_config_map(self, value):
"""Creates OperationContract for deploying an unversioned configmap
To verify the operation, we just check that the configmap was created with
the correct 'value'.
"""
name = self.TEST_APP + '-configmap'
manifest = self.mf.config_map(name, {'value': value})
manifest['metadata']['annotations'] = {'strategy.spinnaker.io/versioned': 'false'}
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [manifest],
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('ConfigMap created',
retryable_for_secs=15)
.get_resources(
'configmap',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.config_map_key_value_predicate('value', value)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def deploy_deployment_with_config_map(self, versioned):
"""Creates OperationContract for deploying a configmap along with a deployment
mounting this configmap.
To verify the operation, we just check that the deployment was created with
the correct configmap mounted
"""
deployment_name = self.TEST_APP + '-deployment'
deployment = self.mf.deployment(deployment_name, 'library/nginx')
configmap_name = self.TEST_APP + '-configmap'
configmap = self.mf.config_map(configmap_name, {'key': 'value'})
if not versioned:
configmap['metadata']['annotations'] = {'strategy.spinnaker.io/versioned': 'false'}
self.mf.add_configmap_volume(deployment, configmap_name)
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [deployment, configmap],
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment created',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[deployment_name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_configmap_mounted_predicate(configmap_name)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def deploy_config_map(self, version):
"""Creates OperationContract for deploying a versioned configmap
To verify the operation, we just check that the deployment was created with
the correct image.
"""
bindings = self.bindings
name = self.TEST_APP + '-configmap'
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [self.mf.config_map(name, {'version': version})],
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('ConfigMap created',
retryable_for_secs=15)
.get_resources(
'configmap',
extra_args=[name + '-' + version, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.config_map_key_value_predicate('version', version)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def save_configmap_deployment_pipeline(self, pipeline_name, versioned=True):
deployment_name = self.TEST_APP + '-deployment'
deployment = self.mf.deployment(deployment_name, 'library/nginx')
configmap_name = self.TEST_APP + '-configmap'
configmap = self.mf.config_map(configmap_name, {'key': 'value'})
if not versioned:
configmap['metadata']['annotations'] = {'strategy.spinnaker.io/versioned': 'false'}
self.mf.add_configmap_volume(deployment, configmap_name)
configmap_stage = {
'refId': 'configmap',
'name': 'Deploy configmap',
'type': 'deployManifest',
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'manifests': [configmap],
}
deployment_stage = {
'refId': 'deployment',
'name': 'Deploy deployment',
'requisiteStageRefIds': ['configmap'],
'type': 'deployManifest',
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': self.bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'manifests': [deployment],
}
return self.ps.submit_pipeline_contract(pipeline_name, [configmap_stage, deployment_stage])
def execute_deploy_manifest_pipeline(self, pipeline_name):
deployment_name = self.TEST_APP + '-deployment'
configmap_name = self.TEST_APP + '-configmap'
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'manual',
'user': '[anonymous]'
}],
description='Deploy manifest in ' + self.TEST_APP,
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment created',
retryable_for_secs=60)
.get_resources(
'deploy',
extra_args=[deployment_name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_configmap_mounted_predicate(configmap_name)))
return st.OperationContract(
self.new_post_operation(
title='Deploy manifest', data=payload,
path='pipelines/' + self.TEST_APP + '/' + pipeline_name),
contract=builder.build())
def deploy_deployment_with_docker_artifact(self, image):
"""Creates OperationContract for deploying and substituting one image into
a Deployment object
To verify the operation, we just check that the deployment was created with
the correct image.
"""
bindings = self.bindings
name = self.TEST_APP + '-deployment'
image_name = 'placeholder'
docker_artifact = self.__docker_image_artifact(image_name, image)
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'moniker': {
'app': self.TEST_APP
},
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'source': 'text',
'type': 'deployManifest',
'user': '[anonymous]',
'manifests': [self.mf.deployment(name, image_name)],
'artifacts': [docker_artifact]
}],
description='Deploy manifest',
application=self.TEST_APP)
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Deployment created',
retryable_for_secs=15)
.get_resources(
'deploy',
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.deployment_image_predicate(image)))
return st.OperationContract(
self.new_post_operation(
title='deploy_manifest', data=payload, path='tasks'),
contract=builder.build())
def delete_kind(self, kind, version=None):
"""Creates OperationContract for deleteManifest
To verify the operation, we just check that the Kubernetes deployment
is no longer visible (or is in the process of terminating).
"""
bindings = self.bindings
name = self.TEST_APP + '-' + kind
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'kubernetes',
'type': 'deleteManifest',
'account': bindings['SPINNAKER_KUBERNETES_V2_ACCOUNT'],
'user': '[anonymous]',
'kinds': [ kind ],
'location': self.TEST_NAMESPACE,
'options': { },
'labelSelectors': {
'selectors': [{
'kind': 'EQUALS',
'key': 'app',
'values': [ self.TEST_APP ]
}]
}
}],
application=self.TEST_APP,
description='Destroy Manifest')
if version is not None:
name = name + '-' + version
builder = kube.KubeContractBuilder(self.kube_v2_observer)
(builder.new_clause_builder('Manifest Removed')
.get_resources(
kind,
extra_args=[name, '--namespace', self.TEST_NAMESPACE])
.EXPECT(self.mp.not_found_observation_predicate()))
return st.OperationContract(
self.new_post_operation(
title='delete_kind', data=payload, path='tasks'),
contract=builder.build())
class KubeV2ArtifactTest(st.AgentTestCase):
"""The test fixture for the KubeV2ArtifactTest.
This is implemented using citest OperationContract instances that are
created by the KubeV2ArtifactTestScenario.
"""
# pylint: disable=missing-docstring
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(
KubeV2ArtifactTestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app())
def test_b1_deploy_deployment_with_docker_artifact(self):
self.run_test_case(self.scenario.deploy_deployment_with_docker_artifact('library/nginx'))
def test_b2_update_deployment_with_docker_artifact(self):
self.run_test_case(self.scenario.deploy_deployment_with_docker_artifact('library/redis'))
def test_b3_delete_deployment(self):
self.run_test_case(self.scenario.delete_kind('deployment'), max_retries=2)
def test_c1_create_config_map(self):
self.run_test_case(self.scenario.deploy_config_map('v000'))
def test_c2_noop_update_config_map(self):
self.run_test_case(self.scenario.deploy_config_map('v000'))
def test_c3_update_config_map(self):
self.run_test_case(self.scenario.deploy_config_map('v001'))
def test_c4_delete_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap', version='v001'), max_retries=2)
def test_d1_create_unversioned_configmap(self):
self.run_test_case(self.scenario.deploy_unversioned_config_map('1'))
def test_d2_update_unversioned_configmap(self):
self.run_test_case(self.scenario.deploy_unversioned_config_map('2'))
def test_d3_delete_unversioned_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap'), max_retries=2)
def test_e1_create_deployment_with_versioned_configmap(self):
self.run_test_case(self.scenario.deploy_deployment_with_config_map(True))
def test_e2_delete_deployment(self):
self.run_test_case(self.scenario.delete_kind('deployment'), max_retries=2)
def test_e3_delete_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap', version='v000'), max_retries=2)
def test_f1_create_configmap_deployment_pipeline(self):
self.run_test_case(self.scenario.save_configmap_deployment_pipeline('deploy-configmap-deployment'))
def test_f2_execute_configmap_deployment_pipeline(self):
self.run_test_case(self.scenario.execute_deploy_manifest_pipeline('deploy-configmap-deployment'))
def test_f3_delete_deployment(self):
self.run_test_case(self.scenario.delete_kind('deployment'), max_retries=2)
def test_f4_delete_configmap(self):
self.run_test_case(self.scenario.delete_kind('configmap', version='v000'), max_retries=2)
def test_z_delete_app(self):
# Give a total of a minute because it might also need
# an internal cache update
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
"""Implements the main method running this artifact test."""
defaults = {
'TEST_STACK': 'tst',
'TEST_APP': 'kubv2arti' + KubeV2ArtifactTestScenario.DEFAULT_TEST_ID
}
return citest.base.TestRunner.main(
parser_inits=[KubeV2ArtifactTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[KubeV2ArtifactTest])
if __name__ == '__main__':
sys.exit(main())
|
|
import os
import unittest
import psycopg2
from vital.security import randhex
from cargo import db, Model as _Model, fields
from cargo.fields import *
from cargo.expressions import *
from cargo.statements import Insert
from cargo.builder import *
db.open()
def run_tests(*tests, **opts):
suite = unittest.TestSuite()
for test_class in tests:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
ut = unittest.TextTestRunner(**opts)
return ut.run(suite)
def run_discovered(path=None):
path = path or os.path.dirname(os.path.realpath(__file__))
ut = unittest.TextTestRunner(verbosity=2, failfast=True)
tests = []
suite = unittest.TestSuite()
for test in unittest.defaultTestLoader.discover(
path, pattern='*.py', top_level_dir=None):
suite.addTests((t for t in test
if t not in tests and not tests.append(t)))
return ut.run(suite)
def setup():
drop_schema(db, 'cargo_tests', cascade=True, if_exists=True)
create_schema(db, 'cargo_tests')
def cleanup():
drop_schema(db, 'cargo_tests', cascade=True, if_exists=True)
def new_field(type='text', value=None, name=None, table=None):
field = getattr(fields, type.title())(value=value)
field.field_name = name or randhex(24)
field.table = table or randhex(24)
return field
def new_expression(cast=int):
if cast == bytes:
cast = lambda x: psycopg2.Binary(str(x).encode())
return Expression(new_field(), '=', cast(12345))
def new_function(cast=int, alias=None):
if cast == bytes:
cast = lambda x: psycopg2.Binary(str(x).encode())
return Function('some_func', cast(12345), alias=alias)
def new_clause(name='FROM', *vals, **kwargs):
vals = vals or ['foobar']
return Clause(name, *vals, **kwargs)
class Model(_Model):
schema = 'cargo_tests'
uid = UID()
class Foo(_Model):
ORDINAL = ('uid', 'textfield')
schema = 'cargo_tests'
uid = Int(index=True, unique=True)
textfield = Text()
class FooB(Foo):
pass
class BaseTestCase(unittest.TestCase):
@classmethod
def tearDownClass(self):
cleanup()
def setUp(self):
self.orm.clear()
class StatementTestCase(BaseTestCase):
orm = Foo(schema='cargo_tests', naked=True)
orm_b = FooB(schema='cargo_tests', naked=True)
@classmethod
def setUpClass(cls):
from cargo.builder.extras import UIDFunction
setup()
Plan(Foo()).execute()
Plan(FooB()).execute()
UIDFunction(cls.orm).execute()
def setUp(self):
for field in self.orm.fields:
field.clear()
self._populate()
self.orm.reset()
self.orm_b.reset()
def tearDown(self):
for orm in (self.orm, self.orm_b):
orm.reset()
orm.where(True).delete()
def _populate(self):
self.orm.state.add(new_clause('INTO', safe('foo')))
self.orm_b.state.add(new_clause('INTO', safe('foo_b')))
def values(orm, start):
orm.fields[0](start)
orm.fields[1](randhex(10))
yield orm.fields[0]
yield orm.fields[1]
for orm in (self.orm, self.orm_b):
start = 1234
orm.values(*values(orm, start))
start += 1
orm.values(*values(orm, start))
start += 1
orm.values(*values(orm, start))
Insert(self.orm).execute()
Insert(self.orm_b).execute()
class LogicTestCase(unittest.TestCase):
def validate_expression(self, expression, left, operator, right,
params=None, values=None):
self.assertIsInstance(expression, Expression)
self.assertIs(expression.left, left)
self.assertEqual(expression.operator, operator)
self.assertEqual(expression.right, right)
if params is not None:
self.assertDictEqual(expression.params, params)
elif values:
for value in values:
self.assertIn(value, list(expression.params.values()))
def validate_function(self, function, func, args, alias=None, values=None):
self.assertIsInstance(function, Function)
self.assertEqual(function.func, func)
self.assertTupleEqual(function.args, tuple(args))
self.assertEqual(function.alias, alias)
if values:
for value in values:
self.assertIn(value, list(function.params.values()))
#: Builder setup
class BuilderTestCase(BaseTestCase):
orm = Foo(schema='cargo_tests', naked=True)
@classmethod
def setUpClass(cls):
setup()
Plan(Foo()).execute()
def setUp(self):
self.orm.clear()
def tearDown(self):
self.orm.clear()
#: Geometry setup
class GeoModel(Model):
path = Path()
lseg = LSeg()
poly = Polygon()
point = Point()
line = Line()
box = Box()
circle = Circle()
array_path = Array(Path())
array_lseg = Array(LSeg())
array_poly = Array(Polygon())
array_point = Array(Point())
array_line = Array(Line())
array_box = Array(Box())
array_circle = Array(Circle())
class GeoPlan(Plan):
model = GeoModel()
class GeoTestCase(BaseTestCase):
orm = GeoModel()
@classmethod
def setUpClass(cls):
setup()
GeoPlan().execute()
#: Integer setup
class IntModel(Model):
integer = Int()
bigint = BigInt()
smallint = SmallInt()
array_integer = Array(Int())
array_bigint = Array(BigInt())
array_smallint = Array(SmallInt())
enc_integer = Encrypted(Encrypted.generate_secret(), Int())
enc_bigint = Encrypted(Encrypted.generate_secret(), BigInt())
enc_smallint = Encrypted(Encrypted.generate_secret(), SmallInt())
array_enc_integer = Array(enc_integer)
array_enc_bigint = Array(enc_bigint)
array_enc_smallint = Array(enc_smallint)
class IntPlan(Plan):
model = IntModel()
class IntTestCase(BaseTestCase):
orm = IntModel()
@classmethod
def setUpClass(cls):
setup()
IntPlan().execute()
#: Character setup
class CharModel(Model):
char = Char(maxlen=200)
varchar = Varchar(maxlen=200)
text = Text()
array_char = Array(Char(maxlen=200))
array_varchar = Array(Varchar(maxlen=200))
array_text = Array(Text())
enc_char = Encrypted(Encrypted.generate_secret(), Char(maxlen=200))
enc_varchar = Encrypted(Encrypted.generate_secret(), Varchar(maxlen=200))
enc_text = Encrypted(Encrypted.generate_secret(), Text())
array_enc_char = Array(enc_char)
array_enc_varchar = Array(enc_varchar)
array_enc_text = Array(enc_text)
class CharPlan(Plan):
model = CharModel()
class CharTestCase(BaseTestCase):
orm = CharModel()
@classmethod
def setUpClass(cls):
setup()
CharPlan().execute()
#: Networking setup
class NetModel(Model):
ip = IP()
cidr = Cidr()
mac = MacAddress()
array_ip = Array(IP())
array_cidr = Array(Cidr())
array_mac = Array(MacAddress())
enc_ip = Encrypted(Encrypted.generate_secret(), IP())
enc_cidr = Encrypted(Encrypted.generate_secret(), Cidr())
enc_mac = Encrypted(Encrypted.generate_secret(), MacAddress())
array_enc_ip = Array(enc_ip)
array_enc_cidr = Array(enc_cidr)
array_enc_mac = Array(enc_mac)
class NetPlan(Plan):
model = NetModel()
class NetTestCase(BaseTestCase):
orm = NetModel()
@classmethod
def setUpClass(cls):
setup()
NetPlan().execute()
@classmethod
def tearDownClass(self):
cleanup()
def setUp(self):
self.orm.clear()
#: Numeric setup
class NumModel(Model):
dec = Decimal()
float4 = Float()
float8 = Double()
currency = Currency()
money = Money()
array_dec = Array(Decimal())
array_float4 = Array(Float())
array_float8 = Array(Double())
array_currency = Array(Currency())
array_money = Array(Money())
enc_dec = Encrypted(Encrypted.generate_secret(), Decimal())
enc_float4 = Encrypted(Encrypted.generate_secret(), Float())
enc_float8 = Encrypted(Encrypted.generate_secret(), Double())
enc_currency = Encrypted(Encrypted.generate_secret(), Currency())
enc_money = Encrypted(Encrypted.generate_secret(), Money())
array_enc_dec = Array(enc_dec)
array_enc_float4 = Array(enc_float4)
array_enc_float8 = Array(enc_float8)
array_enc_currency = Array(enc_currency)
array_enc_money = Array(enc_money)
class NumPlan(Plan):
model = NumModel()
class NumTestCase(BaseTestCase):
orm = NumModel()
@classmethod
def setUpClass(cls):
setup()
NumPlan().execute()
#: Extras setup
class ExtrasModel(Model):
username = Username()
email = Email()
password = Password()
slug = Slug()
key = Key()
phone = PhoneNumber()
duration = Duration()
array_username = Array(Username())
array_email = Array(Email())
array_password = Array(Password())
array_slug = Array(Slug())
array_key = Array(Key())
array_phone = Array(PhoneNumber())
array_duration = Array(Duration())
enc_username = Encrypted(Encrypted.generate_secret(),
Username(not_null=False))
enc_email = Encrypted(Encrypted.generate_secret(), Email())
enc_password = Encrypted(Encrypted.generate_secret(), Password())
enc_slug = Encrypted(Encrypted.generate_secret(), Slug())
enc_key = Encrypted(Encrypted.generate_secret(), Key())
enc_phone = Encrypted(Encrypted.generate_secret(), PhoneNumber())
enc_duration = Encrypted(Encrypted.generate_secret(), Duration())
array_enc_username = Array(enc_username)
array_enc_email = Array(enc_email)
array_enc_password = Array(enc_password)
array_enc_slug = Array(enc_slug)
array_enc_key = Array(enc_key)
array_enc_phone = Array(enc_phone)
array_enc_duration = Array(enc_duration)
class ExtrasPlan(Plan):
model = ExtrasModel()
class ExtrasTestCase(BaseTestCase):
orm = ExtrasModel()
@classmethod
def setUpClass(cls):
setup()
ExtrasPlan().execute()
ExtrasModel.username.register_type(cls.orm.db)
#: Binary setup
class BinaryModel(Model):
binary_field = Binary()
array_binary_field = Array(Binary())
enc_binary_field = Encrypted(Encrypted.generate_secret(), Binary())
array_enc_binary_field = Array(enc_binary_field)
class BinaryPlan(Plan):
model = BinaryModel()
class BinaryTestCase(BaseTestCase):
orm = BinaryModel()
@classmethod
def setUpClass(cls):
setup()
BinaryPlan().execute()
@classmethod
def tearDownClass(self):
cleanup()
def setUp(self):
self.orm.clear()
#: Binary setup
class BitModel(Model):
bit_field = Bit(4)
varbit_field = Varbit(4)
array_bit_field = Array(Bit(4))
array_varbit_field = Array(Varbit(4))
class BitPlan(Plan):
model = BitModel()
class BitTestCase(BaseTestCase):
orm = BitModel()
@classmethod
def setUpClass(cls):
setup()
BitPlan().execute()
@classmethod
def tearDownClass(self):
cleanup()
def setUp(self):
self.orm.clear()
#: Boolean setup
class BooleanModel(Model):
boolean = Bool()
array_boolean = Array(Bool())
class BooleanPlan(Plan):
model = BooleanModel()
class BooleanTestCase(BaseTestCase):
orm = BooleanModel()
@classmethod
def setUpClass(cls):
setup()
BooleanPlan().execute()
#: DateTime setup
class DateTimeModel(Model):
time = Time()
timetz = TimeTZ()
ts = Timestamp()
tstz = TimestampTZ()
date = Date()
array_time = Array(Time())
array_timetz = Array(TimeTZ())
array_ts = Array(Timestamp())
array_tstz = Array(TimestampTZ())
array_date = Array(Date())
enc_time = Encrypted(Encrypted.generate_secret(), Time())
enc_date = Encrypted(Encrypted.generate_secret(), Date())
enc_ts = Encrypted(Encrypted.generate_secret(), Timestamp())
array_enc_time = Array(enc_time)
array_enc_date = Array(enc_date)
array_enc_ts = Array(enc_ts)
class DateTimePlan(Plan):
model = DateTimeModel()
class DateTimeTestCase(BaseTestCase):
orm = DateTimeModel()
@classmethod
def setUpClass(cls):
setup()
DateTimePlan().execute()
@classmethod
def tearDownClass(self):
cleanup()
def setUp(self):
self.orm.clear()
#: Identifier setup
class UIDModel(Model):
pass
class StrUIDModel(Model):
uid = StrUID()
class SerialModel(_Model):
schema = 'cargo_tests'
uid = None
serial = Serial()
class SmallSerialModel(SerialModel):
serial = SmallSerial()
class BigSerialModel(SerialModel):
serial = BigSerial()
class UUIDModel(Model):
uid = UID(primary=False)
uuid = UUID()
array_uuid = Array(UUID())
class IdentifierTestCase(BaseTestCase):
@classmethod
def setUpClass(cls):
setup()
for m in (UIDModel(), StrUIDModel(), SerialModel(), SmallSerialModel(),
BigSerialModel(), UUIDModel()):
Plan(model=m).execute()
#: KeyValue setup
class KeyValueModel(Model):
json_field = Json()
jsonb_field = JsonB()
hstore_field = HStore()
array_json_field = Array(Json())
array_jsonb_field = Array(JsonB())
array_hstore_field = Array(HStore())
enc_json = Encrypted(Encrypted.generate_secret(), Json())
enc_jsonb = Encrypted(Encrypted.generate_secret(), JsonB())
enc_hstore = Encrypted(Encrypted.generate_secret(), HStore())
array_enc_json = Array(enc_json)
array_enc_jsonb = Array(enc_jsonb)
array_enc_hstore = Array(enc_hstore)
class KeyValuePlan(Plan):
model = KeyValueModel()
class KeyValueTestCase(BaseTestCase):
orm = KeyValueModel()
@classmethod
def setUpClass(cls):
setup()
KeyValuePlan().execute()
@classmethod
def tearDownClass(cls):
cleanup()
def setUp(self):
KeyValueModel.hstore_field.register_type(self.orm.db)
self.orm.clear()
#: Range setup
class RangeModel(Model):
integer = IntRange()
bigint = BigIntRange()
date = DateRange()
numeric = NumericRange()
timestamp = TimestampRange()
timestamptz = TimestampTZRange()
array_integer = Array(IntRange())
array_bigint = Array(BigIntRange())
array_date = Array(DateRange())
array_numeric = Array(NumericRange())
array_timestamp = Array(TimestampRange())
array_timestamptz = Array(TimestampTZRange())
class RangePlan(Plan):
model = RangeModel()
class RangeTestCase(BaseTestCase):
orm = RangeModel()
@classmethod
def setUpClass(cls):
setup()
RangePlan().execute()
#: Sequence setup
class SequenceModel(Model):
enum = Enum('red', 'white', 'blue')
array_enum = Array(Enum('red', 'white', 'blue'))
class SequencePlan(Plan):
model = SequenceModel()
def after(self):
self.model.array_enum.register_type(db.client)
self.model.enum.register_type(db.client)
class SequenceTestCase(BaseTestCase):
orm = SequenceModel()
@classmethod
def setUpClass(cls):
setup()
SequencePlan().execute()
@classmethod
def tearDownClass(self):
cleanup()
def setUp(self):
self.orm.clear()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from keystone.openstack.common import context as request_context
from keystone.openstack.common.db.sqlalchemy import models
from keystone.openstack.common.gettextutils import _, _LI, _LW
from keystone.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
..code:: python
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
DEPRECATED: this function is deprecated and will be removed from keystone.db
in a few releases. Please use UniqueConstraint.drop() method directly for
sqlalchemy-migrate migration scripts.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
from migrate.changeset import UniqueConstraint
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = sqlalchemy.sql.select(
columns_for_select, group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = sqlalchemy.sql.select(
[table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
|
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause
class AssignmentClauseTests(unittest.TestCase):
def test_rendering(self):
pass
def test_insert_tuple(self):
ac = AssignmentClause('a', 'b')
ac.set_context_id(10)
self.assertEqual(ac.insert_tuple(), ('a', 10))
class SetUpdateClauseTests(unittest.TestCase):
def test_update_from_none(self):
c = SetUpdateClause('s', set((1, 2)), previous=None)
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, set((1, 2)))
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((1, 2))})
def test_null_update(self):
""" tests setting a set to None creates an empty update statement """
c = SetUpdateClause('s', None, previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 0)
self.assertEqual(str(c), '')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {})
def test_no_update(self):
""" tests an unchanged value creates an empty update statement """
c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 0)
self.assertEqual(str(c), '')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {})
def test_update_empty_set(self):
"""tests assigning a set to an empty set creates a nonempty
update statement and nonzero context size."""
c = SetUpdateClause(field='s', value=set())
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, set())
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set()})
def test_additions(self):
c = SetUpdateClause('s', set((1, 2, 3)), previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._additions, set((3,)))
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = "s" + %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((3,))})
def test_removals(self):
c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2, 3)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._additions)
self.assertEqual(c._removals, set((3,)))
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = "s" - %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((3,))})
def test_additions_and_removals(self):
c = SetUpdateClause('s', set((2, 3)), previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._additions, set((3,)))
self.assertEqual(c._removals, set((1,)))
self.assertEqual(c.get_context_size(), 2)
self.assertEqual(str(c), '"s" = "s" + %(0)s, "s" = "s" - %(1)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((3,)), '1': set((1,))})
class ListUpdateClauseTests(unittest.TestCase):
def test_update_from_none(self):
c = ListUpdateClause('s', [1, 2, 3])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
def test_update_from_empty(self):
c = ListUpdateClause('s', [1, 2, 3], previous=[])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
def test_update_from_different_list(self):
c = ListUpdateClause('s', [1, 2, 3], previous=[3, 2, 1])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
def test_append(self):
c = ListUpdateClause('s', [1, 2, 3, 4], previous=[1, 2])
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._append, [3, 4])
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = "s" + %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [3, 4]})
def test_prepend(self):
c = ListUpdateClause('s', [1, 2, 3, 4], previous=[3, 4])
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._append)
self.assertEqual(c._prepend, [1, 2])
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s + "s"')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2]})
def test_append_and_prepend(self):
c = ListUpdateClause('s', [1, 2, 3, 4, 5, 6], previous=[3, 4])
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._append, [5, 6])
self.assertEqual(c._prepend, [1, 2])
self.assertEqual(c.get_context_size(), 2)
self.assertEqual(str(c), '"s" = %(0)s + "s", "s" = "s" + %(1)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2], '1': [5, 6]})
def test_shrinking_list_update(self):
""" tests that updating to a smaller list results in an insert statement """
c = ListUpdateClause('s', [1, 2, 3], previous=[1, 2, 3, 4])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
class MapUpdateTests(unittest.TestCase):
def test_update(self):
c = MapUpdateClause('s', {3: 0, 5: 6}, previous={5: 0, 3: 4})
c._analyze()
c.set_context_id(0)
self.assertEqual(c._updates, [3, 5])
self.assertEqual(c.get_context_size(), 4)
self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6})
def test_update_from_null(self):
c = MapUpdateClause('s', {3: 0, 5: 6})
c._analyze()
c.set_context_id(0)
self.assertEqual(c._updates, [3, 5])
self.assertEqual(c.get_context_size(), 4)
self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6})
def test_nulled_columns_arent_included(self):
c = MapUpdateClause('s', {3: 0}, {1: 2, 3: 4})
c._analyze()
c.set_context_id(0)
self.assertNotIn(1, c._updates)
class CounterUpdateTests(unittest.TestCase):
def test_positive_update(self):
c = CounterUpdateClause('a', 5, 3)
c.set_context_id(5)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"a" = "a" + %(5)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'5': 2})
def test_negative_update(self):
c = CounterUpdateClause('a', 4, 7)
c.set_context_id(3)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"a" = "a" - %(3)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'3': 3})
def noop_update(self):
c = CounterUpdateClause('a', 5, 5)
c.set_context_id(5)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"a" = "a" + %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'5': 0})
class MapDeleteTests(unittest.TestCase):
def test_update(self):
c = MapDeleteClause('s', {3: 0}, {1: 2, 3: 4, 5: 6})
c._analyze()
c.set_context_id(0)
self.assertEqual(c._removals, [1, 5])
self.assertEqual(c.get_context_size(), 2)
self.assertEqual(str(c), '"s"[%(0)s], "s"[%(1)s]')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': 1, '1': 5})
class FieldDeleteTests(unittest.TestCase):
def test_str(self):
f = FieldDeleteClause("blake")
assert str(f) == '"blake"'
|
|
from unittest.mock import call, patch, Mock
import asyncio
import pytest
from . import FakeSleeper
from . import deadman_app
from . import mock_plugin
def fake_best_replicas(replicas):
l = sorted(replicas, key=lambda x: x[1]['willing'])
if l:
winner = l[0][1]['willing']
l = [i for i in l if winner == i[1]['willing']]
for id, state in l:
yield id, state
def mock_state(replica=False, **kw):
if replica:
defaults = dict(
health_problems={},
replica=replica,
pg_last_xlog_replay_location='68A/16E1DA8',
pg_last_xlog_receive_location='68A/16E1DA8')
else:
defaults = dict(
health_problems={},
replica=replica,
pg_current_xlog_location='68A/16E1DA8')
defaults.update(kw)
return defaults
@pytest.fixture
def app(deadman_app):
return deadman_app(dict(deadman=dict(tick_time=1)))
NO_SUBSCRIBER = object()
def state_getter(app, *extra_states):
def dcs_list_state():
# generate some mock state
for id, state in [(app.my_id, app._state)] + list(extra_states):
yield id, state
return dcs_list_state
def setup_plugins(app, **kw):
plugin = mock_plugin(app._pm)
plugin.best_replicas.side_effect = fake_best_replicas
get_my_id = kw.get('get_my_id', '42')
pg_replication_role = kw.get('pg_replication_role', 'replica')
defaults = {
'pg_replication_role': pg_replication_role,
'pg_get_timeline': 1,
'dcs_get_timeline': 1,
'get_conn_info': dict(host='127.0.0.1'),
'get_my_id': get_my_id,
'dcs_set_database_identifier': True,
'dcs_get_database_identifier': '12345',
'pg_get_database_identifier': '12345',
}
if pg_replication_role == 'master':
defaults['dcs_lock'] = True
defaults.update(kw)
for k, v in defaults.items():
func = getattr(plugin, k)
func.return_value = v
return plugin
def test_master_bootstrap(app):
plugins = setup_plugins(app,
dcs_get_database_identifier=None,
dcs_lock=True,
pg_get_database_identifier='42')
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# check if we have a db identifier set
call.dcs_get_database_identifier(),
# no, ok, init our db
call.pg_initdb(),
# make sure it starts
call.pg_start(),
call.pg_get_database_identifier(),
# lock the database identifier so no-one else gets here
call.dcs_lock('database_identifier'),
# while locked make sure there is no id set in the DCS before we got the lock
call.dcs_get_database_identifier(),
# Make the first backup while locked with no DCS
call.pg_backup(),
# set the database identifier AFTER
call.dcs_set_database_identifier('42')
]
# shut down cleanly and immediately
assert timeout == 0
def test_master_boostrap_fails_to_lock_db_id(app):
plugins = setup_plugins(app,
dcs_get_database_identifier=None,
dcs_lock=False,
pg_get_database_identifier='42')
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# check if we have a db identifier set
call.dcs_get_database_identifier(),
# no, ok, init our db
call.pg_initdb(),
# make sure it starts
call.pg_start(),
call.pg_get_database_identifier(),
# lock the database identifier so no-one else gets here
call.dcs_lock('database_identifier')
]
# shut down cleanly
assert timeout == 5
def test_replica_bootstrap(app):
plugins = setup_plugins(app,
dcs_get_database_identifier='1234')
plugins.pg_get_database_identifier.side_effect = ['42', '1234']
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# make sure postgresql is stopped
call.pg_stop(),
# postgresql restore
call.pg_initdb(),
call.pg_restore(),
call.pg_setup_replication(None),
call.pg_get_database_identifier(),
call.pg_replication_role()
]
# shut down cleanly and immediately
assert timeout == 0
def test_replica_bootstrap_fails_sanity_test(app):
plugins = setup_plugins(app,
pg_replication_role='master',
dcs_get_database_identifier='1234',
pg_get_database_identifier='42')
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# make sure postgresql is stopped
call.pg_stop(),
# postgresql restore
call.pg_initdb(),
call.pg_restore(),
call.pg_setup_replication(None),
call.pg_get_database_identifier(),
call.pg_replication_role(),
call.pg_reset(),
]
# shut down after 5 seconds to try again
assert timeout == 5
@pytest.mark.asyncio
async def test_master_start(app):
plugins = setup_plugins(app,
dcs_get_database_identifier='1234',
dcs_lock=True,
pg_replication_role='master',
pg_get_database_identifier='1234')
def start_monitoring():
app.unhealthy('test_monitor', 'Waiting for first check')
plugins.start_monitoring.side_effect = start_monitoring
# sync startup
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# check if I am a replica
call.pg_replication_role(),
# no, so check if there is a master
call.dcs_lock('master'),
# no master, so sure the DB is running
call.pg_start(),
# start monitoring
call.start_monitoring(),
call.dcs_watch(
app.master_lock_changed,
app._notify_state,
app._notify_conn_info,
),
call.get_conn_info(),
# set our first state
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'master',
'health_problems': {'test_monitor':
{'can_be_replica': False, 'reason': 'Waiting for first check'}}})
]
# Carry on running afterwards
assert timeout == None
assert app.health_problems == {'test_monitor': {'can_be_replica': False, 'reason': 'Waiting for first check'}}
# Our test monitor becomes healthy
plugins.reset_mock()
app.healthy('test_monitor')
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'master',
'health_problems': {}}),
call.pg_replication_role(),
call.dcs_lock('master'),
call.dcs_set_conn_info({'host': '127.0.0.1'}),
]
def test_failed_over_master_start(app):
# A master has failed over and restarted, another master has sucessfully advanced
plugins = setup_plugins(app,
dcs_lock=False,
dcs_get_timeline=2,
pg_get_timeline=1,
pg_replication_role='master')
# sync startup
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# check if I am a replica
call.pg_replication_role(),
# no, so check if there is a master
call.dcs_lock('master'),
call.dcs_get_lock_owner('master'),
call.pg_stop(),
# compare our timeline to what's in the DCS
call.pg_get_timeline(),
call.dcs_get_timeline(),
# we're on an older timeline, so reset
call.pg_reset(),
]
# Carry on running afterwards
assert timeout == 5
def test_replica_start(app):
plugins = setup_plugins(app,
dcs_get_database_identifier='1234',
dcs_lock=True,
pg_replication_role='replica',
pg_get_database_identifier='1234')
app._conn_info['a'] = 'b'
def start_monitoring():
app.unhealthy('test_monitor', 'Waiting for first check')
plugins.start_monitoring.side_effect = start_monitoring
# sync startup
timeout = app.initialize()
assert plugins.mock_calls == [
call.initialize(),
call.get_my_id(),
# compare our id with the id in the DCS
call.dcs_get_database_identifier(),
call.pg_get_database_identifier(),
# check if I am a replica
call.pg_replication_role(),
# not master, so sure the DB is running
call.pg_start(),
# start monitoring
call.start_monitoring(),
call.dcs_watch(
app.master_lock_changed,
app._notify_state,
app._notify_conn_info,
),
# setup our connection info
call.get_conn_info(),
# set our first state
call.dcs_set_state({
'a': 'b',
'host': '127.0.0.1',
'replication_role': 'replica',
'health_problems': {'test_monitor':
{'can_be_replica': False, 'reason': 'Waiting for first check'}},
})
]
# Carry on running afterwards
assert timeout == None
assert app.health_problems == {'test_monitor': {'can_be_replica': False, 'reason': 'Waiting for first check'}}
# Our test monitor becomes healthy
plugins.reset_mock()
with patch('time.time') as mock_time:
app.healthy('test_monitor')
assert plugins.mock_calls == [
call.veto_takeover({'health_problems': {},
'a': 'b',
'replication_role': 'replica',
'host': '127.0.0.1'}),
call.dcs_set_state({'health_problems': {},
'a': 'b',
'replication_role': 'replica',
'host': '127.0.0.1',
'willing': mock_time(),
}),
call.pg_replication_role(),
call.dcs_set_conn_info({'a': 'b', 'host': '127.0.0.1'}),
]
def test_plugin_subscribe_to_state(app):
plugins = setup_plugins(app)
app.initialize()
assert plugins.dcs_watch.mock_calls == [
call.dcs_watch(
app.master_lock_changed,
app._notify_state,
app._notify_conn_info,
)]
def test_plugin_tells_app_to_follow_new_leader(app):
plugins = setup_plugins(app)
app.initialize()
plugins.reset_mock()
app.follow(primary_conninfo=dict(host='127.0.0.9', port=5432))
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_setup_replication({'port': 5432, 'host': '127.0.0.9'}),
call.pg_restart()] # must restart for new recovery.conf to take effect
def test_restart_master(app, event_loop):
plugins = setup_plugins(app,
pg_replication_role='master')
app.initialize()
plugins.reset_mock()
with patch('time.sleep') as sleep:
app.restart(10)
assert sleep.called_once_with(10)
event_loop.run_forever() # must be stopped by restart()
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect()
]
def test_restart_replica(app, event_loop):
plugins = setup_plugins(app,
pg_replication_role='replica')
app.initialize()
plugins.reset_mock()
with patch('time.sleep') as sleep:
app.restart(10)
assert sleep.called_once_with(10)
event_loop.run_forever() # must be stopped by restart()
assert plugins.mock_calls == [
call.pg_replication_role(),
call.dcs_disconnect()
]
@pytest.mark.asyncio
async def test_master_lock_broken(app):
plugins = setup_plugins(app,
pg_replication_role='master')
assert app.initialize() == None
plugins.reset_mock()
# if the lock is broken, shutdown postgresql and exist
with patch('time.sleep') as sleep:
with patch('sys.exit') as exit:
app.master_lock_changed(None)
assert exit.called_once_with(0)
assert sleep.called_once_with(10)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect(),
call.master_lock_changed(None)
]
assert app._master_lock_owner == None
@pytest.mark.asyncio
async def test_master_lock_changes_owner(app):
# if the lock changes owner to someone else, shutdown postgresql and exist
plugins = setup_plugins(app,
pg_replication_role='master')
assert app.initialize() == None
plugins.reset_mock()
with patch('time.sleep') as sleep:
with patch('sys.exit') as exit:
app.master_lock_changed('someone else')
assert exit.called_once_with(0)
assert sleep.called_once_with(10)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect(),
call.master_lock_changed('someone else')
]
assert app._master_lock_owner == 'someone else'
# if the lock is owned by us, carry on trucking
plugins.reset_mock()
with patch('time.sleep') as sleep:
with patch('sys.exit') as exit:
app.master_lock_changed(app.my_id)
assert exit.called_once_with(0)
assert sleep.called_once_with(10)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.master_lock_changed('42')
]
assert app._master_lock_owner == app.my_id
@pytest.mark.asyncio
async def test_plugin_subscribes_to_master_lock_change(app):
plugins = setup_plugins(app,
pg_get_timeline=42,
master_lock_changed=[('pluginA', None)],
pg_replication_role='replica')
assert app.initialize() == None
plugins.reset_mock()
app.master_lock_changed('someone else')
assert plugins.mock_calls == [
call.pg_replication_role(),
call.master_lock_changed('someone else'),
]
@pytest.mark.asyncio
async def test_replica_reaction_to_master_lock_change(app):
plugins = setup_plugins(app,
pg_get_timeline=42,
pg_replication_role='replica')
assert app.initialize() == None
plugins.reset_mock()
# if the lock changes owner to someone else, carry on trucking
plugins.reset_mock()
app.master_lock_changed('someone else')
assert plugins.mock_calls == [
call.pg_replication_role(),
call.master_lock_changed('someone else')
]
assert app._master_lock_owner == 'someone else'
# if the lock is owned by us, er, we stop replication and become the master
plugins.reset_mock()
plugins.pg_replication_role.side_effect = ['replica', 'master']
app.master_lock_changed(app.my_id)
assert plugins.mock_calls == [
call.pg_replication_role(),
call.dcs_set_state({
'replication_role': 'taking-over',
'willing': None,
'health_problems': {},
'host': '127.0.0.1'}),
call.pg_stop_replication(),
call.pg_replication_role(),
call.pg_get_timeline(),
call.dcs_set_timeline(42),
call.dcs_set_state({
'health_problems': {},
'replication_role': 'master',
'willing': None,
'host': '127.0.0.1'}),
call.master_lock_changed('42')
]
assert app._master_lock_owner == app.my_id
@pytest.mark.asyncio
async def test_replica_tries_to_take_over(app):
plugins = setup_plugins(app,
pg_replication_role='replica')
assert app.initialize() == None
plugins.reset_mock()
# if there is no lock owner, we start looping trying to become master
app.master_lock_changed(None)
assert plugins.mock_calls == [call.pg_replication_role(), call.master_lock_changed(None)]
plugins.reset_mock()
from asyncio import sleep as real_sleep
with patch('asyncio.sleep') as sleep:
sleeper = FakeSleeper()
sleep.side_effect = sleeper
# the first thing is to sleep a bit
await sleeper.next()
assert sleeper.log == [3]
assert plugins.mock_calls == []
# takeover attempted
states = [(app.my_id, {'willing': 99.0}), (app.my_id, {'willing': 100.0})]
plugins.dcs_list_state.return_value = states
await sleeper.next()
assert sleeper.log == [3, 3]
assert plugins.mock_calls == [
call.dcs_list_state(),
call.best_replicas([('42', {'willing': 99.0}), ('42', {'willing': 100.0})]),
call.dcs_lock('master')]
def test_replica_unhealthy(app):
plugins = setup_plugins(app,
pg_replication_role='replica')
app.initialize()
plugins.reset_mock()
app.unhealthy('boom', 'It went Boom')
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'replica',
'willing': None, # I am not going to participate in master elections
'health_problems': {'boom': {'reason': 'It went Boom', 'can_be_replica': False}}}),
call.pg_replication_role(),
call.dcs_delete_conn_info(),
]
def test_replica_slightly_sick(app):
plugins = setup_plugins(app,
pg_replication_role='replica')
app.initialize()
plugins.reset_mock()
app.unhealthy('boom', 'It went Boom', can_be_replica=True)
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'replica',
'willing': None, # I am not going to participate in master elections
'health_problems': {'boom': {'reason': 'It went Boom', 'can_be_replica': True}}}),
call.pg_replication_role(),
]
@pytest.mark.asyncio
async def test_master_unhealthy(app):
plugins = setup_plugins(app,
pg_replication_role='master')
app.initialize()
plugins.reset_mock()
app.unhealthy('boom', 'It went Boom', can_be_replica=True)
assert plugins.mock_calls == [
call.dcs_set_state({
'host': '127.0.0.1',
'replication_role': 'master',
'health_problems': {'boom': {'reason': 'It went Boom', 'can_be_replica': True}}}),
call.pg_replication_role(),
call.dcs_delete_conn_info(),
]
plugins.reset_mock()
# now we should have _handle_unhealthy_master running
with patch('asyncio.sleep') as sleep, patch('zgres.deadman.App._stop') as exit, patch('time.sleep') as blocking_sleep:
sleeper = FakeSleeper()
sleep.side_effect = sleeper
exit.side_effect = lambda : sleeper.finish()
# there is no replica, so we just sleep and ping the
# DCS to find a willing replica
states = [iter([])]
plugins.dcs_list_state.side_effect = states
await sleeper.next()
assert plugins.mock_calls == [call.dcs_list_state()]
# we add a willing replica
states = [iter([('other', {'willing': 1})])]
plugins.dcs_list_state.side_effect = states
plugins.reset_mock()
await sleeper.next()
assert plugins.mock_calls == [
call.dcs_list_state(),
call.pg_replication_role(),
call.pg_stop(),
call.dcs_disconnect()
]
|
|
import logging
from django.contrib.auth import get_user_model
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.urls import reverse
from django.views.generic import CreateView, DetailView, TemplateView, UpdateView, View
from django_filters.rest_framework import DjangoFilterBackend
from ipware.ip import get_ip
from rest_framework.generics import ListAPIView
from waffle import get_waffle_flag_model
from common.mixins import NeverCacheMixin, PrivateMixin
from .filters import AccessLogFilter
from .forms import DataTypeForm
from .models import (
AWSDataFileAccessLog,
DataFile,
DataFileKey,
DataType,
NewDataFileAccessLog,
)
from .permissions import LogAPIAccessAllowed
from common.mixins import NeverCacheMixin
from data_import.serializers import (
AWSDataFileAccessLogSerializer,
NewDataFileAccessLogSerializer,
serialize_datafile_to_dict,
)
from private_sharing.api_authentication import CustomOAuth2Authentication
from private_sharing.api_permissions import HasValidProjectToken
UserModel = get_user_model()
FlagModel = get_waffle_flag_model()
logger = logging.getLogger(__name__)
class DataFileDownloadView(View):
"""
Log a download and redirect the requestor to its actual location.
"""
def get_and_log(self, request, key_object=None):
"""
Redirect to S3 URL for file. Log access if this feature is active.
Feature activity for specific users is based on whether the datafile
belongs to that user, not based on the user doing the download.
"""
aws_url = self.data_file.file_url_as_attachment
url = "{0}&x-oh-key={1}".format(aws_url, key_object.key)
# Check feature flag based on file user (subject), not request user.
flag = FlagModel.get("datafile-access-logging")
if not flag.is_active(request=request, subject=self.data_file.user):
return HttpResponseRedirect(url)
user = request.user if request.user.is_authenticated else None
access_log = NewDataFileAccessLog(
user=user, ip_address=get_ip(request), data_file=self.data_file
)
access_log.data_file_key = {
"id": key_object.id,
"created": key_object.created.isoformat(),
"key": key_object.key,
"datafile_id": key_object.datafile_id,
"key_creation_ip_address": key_object.ip_address,
"access_token": key_object.access_token,
"project_id": key_object.project_id,
}
access_log.aws_url = url
access_log.serialized_data_file = serialize_datafile_to_dict(self.data_file)
access_log.save()
return HttpResponseRedirect(url)
# pylint: disable=attribute-defined-outside-init
def get(self, request, *args, **kwargs):
data_file_qs = DataFile.objects.filter(pk=self.kwargs.get("pk"))
if data_file_qs.exists():
self.data_file = data_file_qs.get()
unavailable = (
hasattr(self.data_file, "parent_project_data_file")
and self.data_file.parent_project_data_file.completed is False
)
else:
unavailable = True
if unavailable:
return HttpResponseForbidden("<h1>This file is unavailable.</h1>")
query_key = request.GET.get("key", None)
if query_key:
key_qs = DataFileKey.objects.filter(datafile_id=self.data_file.id)
key_qs = key_qs.filter(key=query_key)
if key_qs.exists():
# exists() is only a method for querysets
key_object = key_qs.get()
# Now we need the actual object
if not key_object.expired:
return self.get_and_log(request, key_object=key_object)
return HttpResponseForbidden(
"<h1>You are not authorized to view this file.</h1>"
)
class NewDataFileAccessLogView(NeverCacheMixin, ListAPIView):
"""
Custom API endpoint returning logs of file access requests for OHLOG_PROJECT_ID
"""
authentication_classes = (CustomOAuth2Authentication,)
filter_backends = (AccessLogFilter, DjangoFilterBackend)
filterset_fields = ("date",)
permission_classes = (HasValidProjectToken, LogAPIAccessAllowed)
serializer_class = NewDataFileAccessLogSerializer
def get_queryset(self):
queryset = NewDataFileAccessLog.objects.filter(
serialized_data_file__user_id=self.request.user.id
)
return queryset
class AWSDataFileAccessLogView(NeverCacheMixin, ListAPIView):
"""
Custom API endpoint returning logs of AWS file access events for OHLOG_PROJECT_ID
"""
authentication_classes = (CustomOAuth2Authentication,)
filter_backends = (AccessLogFilter, DjangoFilterBackend)
filterset_fields = ("time",)
permission_classes = (HasValidProjectToken, LogAPIAccessAllowed)
serializer_class = AWSDataFileAccessLogSerializer
def get_queryset(self):
queryset = AWSDataFileAccessLog.objects.filter(
serialized_data_file__user_id=self.request.user.id
)
return queryset
class DataTypesSortedMixin(object):
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
datatypes_sorted = DataType.sorted_by_ancestors()
try:
max_depth = max([i["depth"] for i in datatypes_sorted])
except ValueError:
max_depth = 0
context.update({"datatypes_sorted": datatypes_sorted, "max_depth": max_depth})
return context
class DataTypesListView(NeverCacheMixin, TemplateView):
"""
List all DataTypes.
"""
template_name = "data_import/datatypes-list.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
datatypes_sorted = DataType.sorted_by_ancestors()
try:
max_depth = max([i["depth"] for i in datatypes_sorted])
except ValueError:
max_depth = 0
context.update({"datatypes_sorted": datatypes_sorted, "max_depth": max_depth})
return context
class DataTypesDetailView(NeverCacheMixin, DetailView):
"""
Information about a DataType.
"""
model = DataType
template_name = "data_import/datatypes-detail.html"
class FormEditorMixin(object):
"""
Override get_form_kwargs to pass request user as 'editor' kwarg to a form.
"""
def get_form_kwargs(self, *args, **kwargs):
kwargs = super().get_form_kwargs(*args, **kwargs)
kwargs["editor"] = self.request.user.member
return kwargs
class DataTypesCreateView(
PrivateMixin, DataTypesSortedMixin, FormEditorMixin, CreateView
):
"""
Create a new DataType.
"""
form_class = DataTypeForm
template_name = "data_import/datatypes-create.html"
def get_success_url(self):
return reverse("data-management:datatypes-list")
class DataTypesUpdateView(
PrivateMixin, DataTypesSortedMixin, FormEditorMixin, UpdateView
):
"""
Edit a DataType.
"""
model = DataType
form_class = DataTypeForm
template_name = "data_import/datatypes-update.html"
def get_success_url(self):
return reverse(
"data-management:datatypes-detail", kwargs={"pk": self.object.id}
)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUFirewallRule(NURESTObject):
""" Represents a FirewallRule in the VSD
Notes:
None
"""
__rest_name__ = "firewallrule"
__resource_name__ = "firewallrules"
## Constants
CONST_ACTION_FORWARDING_PATH_LIST = "FORWARDING_PATH_LIST"
CONST_NETWORK_TYPE_NETWORK_MACRO_GROUP = "NETWORK_MACRO_GROUP"
CONST_ACTION_DROP = "DROP"
CONST_LOCATION_TYPE_ZONE = "ZONE"
CONST_ACTION_REDIRECT = "REDIRECT"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_NETWORK_TYPE_PUBLIC_NETWORK = "PUBLIC_NETWORK"
CONST_ACTION_FORWARD = "FORWARD"
CONST_NETWORK_TYPE_POLICYGROUP = "POLICYGROUP"
CONST_LOCATION_TYPE_ANY = "ANY"
CONST_NETWORK_TYPE_ENDPOINT_DOMAIN = "ENDPOINT_DOMAIN"
CONST_NETWORK_TYPE_ENTERPRISE_NETWORK = "ENTERPRISE_NETWORK"
CONST_NETWORK_TYPE_ANY = "ANY"
CONST_LOCATION_TYPE_POLICYGROUP = "POLICYGROUP"
CONST_NETWORK_TYPE_SUBNET = "SUBNET"
CONST_NETWORK_TYPE_ZONE = "ZONE"
CONST_ASSOCIATED_TRAFFIC_TYPE_L4_SERVICE_GROUP = "L4_SERVICE_GROUP"
CONST_NETWORK_TYPE_ENDPOINT_SUBNET = "ENDPOINT_SUBNET"
CONST_LOCATION_TYPE_VPORTTAG = "VPORTTAG"
CONST_LOCATION_TYPE_SUBNET = "SUBNET"
CONST_NETWORK_TYPE_NETWORK = "NETWORK"
CONST_ASSOCIATED_TRAFFIC_TYPE_L4_SERVICE = "L4_SERVICE"
CONST_WEB_FILTER_TYPE_WEB_DOMAIN_NAME = "WEB_DOMAIN_NAME"
CONST_LOCATION_TYPE_REDIRECTIONTARGET = "REDIRECTIONTARGET"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_WEB_FILTER_TYPE_WEB_CATEGORY = "WEB_CATEGORY"
CONST_NETWORK_TYPE_INTERNET_POLICYGROUP = "INTERNET_POLICYGROUP"
CONST_NETWORK_TYPE_ENDPOINT_ZONE = "ENDPOINT_ZONE"
def __init__(self, **kwargs):
""" Initializes a FirewallRule instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> firewallrule = NUFirewallRule(id=u'xxxx-xxx-xxx-xxx', name=u'FirewallRule')
>>> firewallrule = NUFirewallRule(data=my_dict)
"""
super(NUFirewallRule, self).__init__()
# Read/Write Attributes
self._acl_template_name = None
self._icmp_code = None
self._icmp_type = None
self._ipv6_address_override = None
self._dscp = None
self._last_updated_by = None
self._action = None
self._address_override = None
self._web_filter_id = None
self._web_filter_type = None
self._description = None
self._destination_port = None
self._network_id = None
self._network_type = None
self._mirror_destination_id = None
self._flow_logging_enabled = None
self._enterprise_name = None
self._entity_scope = None
self._location_id = None
self._location_type = None
self._domain_name = None
self._source_port = None
self._priority = None
self._protocol = None
self._associated_live_template_id = None
self._associated_traffic_type = None
self._associated_traffic_type_id = None
self._associatedfirewall_aclid = None
self._stateful = None
self._stats_id = None
self._stats_logging_enabled = None
self._ether_type = None
self._external_id = None
self.expose_attribute(local_name="acl_template_name", remote_name="ACLTemplateName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="icmp_code", remote_name="ICMPCode", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="icmp_type", remote_name="ICMPType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_address_override", remote_name="IPv6AddressOverride", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="dscp", remote_name="DSCP", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="action", remote_name="action", attribute_type=str, is_required=False, is_unique=False, choices=[u'DROP', u'FORWARD', u'FORWARDING_PATH_LIST', u'REDIRECT'])
self.expose_attribute(local_name="address_override", remote_name="addressOverride", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="web_filter_id", remote_name="webFilterID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="web_filter_type", remote_name="webFilterType", attribute_type=str, is_required=False, is_unique=False, choices=[u'WEB_CATEGORY', u'WEB_DOMAIN_NAME'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="destination_port", remote_name="destinationPort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_id", remote_name="networkID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_type", remote_name="networkType", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'ENDPOINT_DOMAIN', u'ENDPOINT_SUBNET', u'ENDPOINT_ZONE', u'ENTERPRISE_NETWORK', u'INTERNET_POLICYGROUP', u'NETWORK', u'NETWORK_MACRO_GROUP', u'POLICYGROUP', u'PUBLIC_NETWORK', u'SUBNET', u'ZONE'])
self.expose_attribute(local_name="mirror_destination_id", remote_name="mirrorDestinationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="flow_logging_enabled", remote_name="flowLoggingEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_name", remote_name="enterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="location_id", remote_name="locationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="location_type", remote_name="locationType", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'POLICYGROUP', u'REDIRECTIONTARGET', u'SUBNET', u'VPORTTAG', u'ZONE'])
self.expose_attribute(local_name="domain_name", remote_name="domainName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="source_port", remote_name="sourcePort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="protocol", remote_name="protocol", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_live_template_id", remote_name="associatedLiveTemplateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_traffic_type", remote_name="associatedTrafficType", attribute_type=str, is_required=False, is_unique=False, choices=[u'L4_SERVICE', u'L4_SERVICE_GROUP'])
self.expose_attribute(local_name="associated_traffic_type_id", remote_name="associatedTrafficTypeID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associatedfirewall_aclid", remote_name="associatedfirewallACLID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="stateful", remote_name="stateful", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="stats_id", remote_name="statsID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="stats_logging_enabled", remote_name="statsLoggingEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="ether_type", remote_name="etherType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def acl_template_name(self):
""" Get acl_template_name value.
Notes:
The name of the parent Template for this acl entry
This attribute is named `ACLTemplateName` in VSD API.
"""
return self._acl_template_name
@acl_template_name.setter
def acl_template_name(self, value):
""" Set acl_template_name value.
Notes:
The name of the parent Template for this acl entry
This attribute is named `ACLTemplateName` in VSD API.
"""
self._acl_template_name = value
@property
def icmp_code(self):
""" Get icmp_code value.
Notes:
The ICMP Code when protocol selected is ICMP
This attribute is named `ICMPCode` in VSD API.
"""
return self._icmp_code
@icmp_code.setter
def icmp_code(self, value):
""" Set icmp_code value.
Notes:
The ICMP Code when protocol selected is ICMP
This attribute is named `ICMPCode` in VSD API.
"""
self._icmp_code = value
@property
def icmp_type(self):
""" Get icmp_type value.
Notes:
The ICMP Type when protocol selected is ICMP
This attribute is named `ICMPType` in VSD API.
"""
return self._icmp_type
@icmp_type.setter
def icmp_type(self, value):
""" Set icmp_type value.
Notes:
The ICMP Type when protocol selected is ICMP
This attribute is named `ICMPType` in VSD API.
"""
self._icmp_type = value
@property
def ipv6_address_override(self):
""" Get ipv6_address_override value.
Notes:
Overrides the source IPV6 for Ingress and destination IPV6 for Egress, MAC entries will use this address as the match criteria.
This attribute is named `IPv6AddressOverride` in VSD API.
"""
return self._ipv6_address_override
@ipv6_address_override.setter
def ipv6_address_override(self, value):
""" Set ipv6_address_override value.
Notes:
Overrides the source IPV6 for Ingress and destination IPV6 for Egress, MAC entries will use this address as the match criteria.
This attribute is named `IPv6AddressOverride` in VSD API.
"""
self._ipv6_address_override = value
@property
def dscp(self):
""" Get dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63
This attribute is named `DSCP` in VSD API.
"""
return self._dscp
@dscp.setter
def dscp(self, value):
""" Set dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63
This attribute is named `DSCP` in VSD API.
"""
self._dscp = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def action(self):
""" Get action value.
Notes:
The action of the ACL entry DROP or FORWARD or REDIRECT.
"""
return self._action
@action.setter
def action(self, value):
""" Set action value.
Notes:
The action of the ACL entry DROP or FORWARD or REDIRECT.
"""
self._action = value
@property
def address_override(self):
""" Get address_override value.
Notes:
Overrides the source IP for Ingress and destination IP for Egress, MAC entries will use this address as the match criteria.
This attribute is named `addressOverride` in VSD API.
"""
return self._address_override
@address_override.setter
def address_override(self, value):
""" Set address_override value.
Notes:
Overrides the source IP for Ingress and destination IP for Egress, MAC entries will use this address as the match criteria.
This attribute is named `addressOverride` in VSD API.
"""
self._address_override = value
@property
def web_filter_id(self):
""" Get web_filter_id value.
Notes:
ID of web filter
This attribute is named `webFilterID` in VSD API.
"""
return self._web_filter_id
@web_filter_id.setter
def web_filter_id(self, value):
""" Set web_filter_id value.
Notes:
ID of web filter
This attribute is named `webFilterID` in VSD API.
"""
self._web_filter_id = value
@property
def web_filter_type(self):
""" Get web_filter_type value.
Notes:
Indicates type of web filter being set
This attribute is named `webFilterType` in VSD API.
"""
return self._web_filter_type
@web_filter_type.setter
def web_filter_type(self, value):
""" Set web_filter_type value.
Notes:
Indicates type of web filter being set
This attribute is named `webFilterType` in VSD API.
"""
self._web_filter_type = value
@property
def description(self):
""" Get description value.
Notes:
Description of the ACL entry
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the ACL entry
"""
self._description = value
@property
def destination_port(self):
""" Get destination_port value.
Notes:
The destination port to be matched if protocol is UDP or TCP. Value should be either * or single port number or a port range
This attribute is named `destinationPort` in VSD API.
"""
return self._destination_port
@destination_port.setter
def destination_port(self, value):
""" Set destination_port value.
Notes:
The destination port to be matched if protocol is UDP or TCP. Value should be either * or single port number or a port range
This attribute is named `destinationPort` in VSD API.
"""
self._destination_port = value
@property
def network_id(self):
""" Get network_id value.
Notes:
The destination network entity that is referenced(subnet/zone/macro)
This attribute is named `networkID` in VSD API.
"""
return self._network_id
@network_id.setter
def network_id(self, value):
""" Set network_id value.
Notes:
The destination network entity that is referenced(subnet/zone/macro)
This attribute is named `networkID` in VSD API.
"""
self._network_id = value
@property
def network_type(self):
""" Get network_type value.
Notes:
Type of the source network - VM_SUBNET or VM_ZONE or VM_DOMAIN or SUBNET or ZONE or ENTERPRISE_NETWORK or PUBLIC_NETWORK or ANY
This attribute is named `networkType` in VSD API.
"""
return self._network_type
@network_type.setter
def network_type(self, value):
""" Set network_type value.
Notes:
Type of the source network - VM_SUBNET or VM_ZONE or VM_DOMAIN or SUBNET or ZONE or ENTERPRISE_NETWORK or PUBLIC_NETWORK or ANY
This attribute is named `networkType` in VSD API.
"""
self._network_type = value
@property
def mirror_destination_id(self):
""" Get mirror_destination_id value.
Notes:
This is the ID of the mirrorDestrination entity associated with this entity
This attribute is named `mirrorDestinationID` in VSD API.
"""
return self._mirror_destination_id
@mirror_destination_id.setter
def mirror_destination_id(self, value):
""" Set mirror_destination_id value.
Notes:
This is the ID of the mirrorDestrination entity associated with this entity
This attribute is named `mirrorDestinationID` in VSD API.
"""
self._mirror_destination_id = value
@property
def flow_logging_enabled(self):
""" Get flow_logging_enabled value.
Notes:
Is flow logging enabled for this particular template
This attribute is named `flowLoggingEnabled` in VSD API.
"""
return self._flow_logging_enabled
@flow_logging_enabled.setter
def flow_logging_enabled(self, value):
""" Set flow_logging_enabled value.
Notes:
Is flow logging enabled for this particular template
This attribute is named `flowLoggingEnabled` in VSD API.
"""
self._flow_logging_enabled = value
@property
def enterprise_name(self):
""" Get enterprise_name value.
Notes:
The name of the enterprise for the domains parent
This attribute is named `enterpriseName` in VSD API.
"""
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
""" Set enterprise_name value.
Notes:
The name of the enterprise for the domains parent
This attribute is named `enterpriseName` in VSD API.
"""
self._enterprise_name = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def location_id(self):
""" Get location_id value.
Notes:
The ID of the location entity (Subnet/Zone/VportTag)
This attribute is named `locationID` in VSD API.
"""
return self._location_id
@location_id.setter
def location_id(self, value):
""" Set location_id value.
Notes:
The ID of the location entity (Subnet/Zone/VportTag)
This attribute is named `locationID` in VSD API.
"""
self._location_id = value
@property
def location_type(self):
""" Get location_type value.
Notes:
Type of the location entity - ANY or SUBNET or ZONE or VPORTTAG
This attribute is named `locationType` in VSD API.
"""
return self._location_type
@location_type.setter
def location_type(self, value):
""" Set location_type value.
Notes:
Type of the location entity - ANY or SUBNET or ZONE or VPORTTAG
This attribute is named `locationType` in VSD API.
"""
self._location_type = value
@property
def domain_name(self):
""" Get domain_name value.
Notes:
The name of the domain/domain template for the aclTemplateNames parent
This attribute is named `domainName` in VSD API.
"""
return self._domain_name
@domain_name.setter
def domain_name(self, value):
""" Set domain_name value.
Notes:
The name of the domain/domain template for the aclTemplateNames parent
This attribute is named `domainName` in VSD API.
"""
self._domain_name = value
@property
def source_port(self):
""" Get source_port value.
Notes:
Source port to be matched if protocol is UDP or TCP. Value can be either * or single port number or a port range
This attribute is named `sourcePort` in VSD API.
"""
return self._source_port
@source_port.setter
def source_port(self, value):
""" Set source_port value.
Notes:
Source port to be matched if protocol is UDP or TCP. Value can be either * or single port number or a port range
This attribute is named `sourcePort` in VSD API.
"""
self._source_port = value
@property
def priority(self):
""" Get priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
self._priority = value
@property
def protocol(self):
""" Get protocol value.
Notes:
Protocol number that must be matched
"""
return self._protocol
@protocol.setter
def protocol(self, value):
""" Set protocol value.
Notes:
Protocol number that must be matched
"""
self._protocol = value
@property
def associated_live_template_id(self):
""" Get associated_live_template_id value.
Notes:
In the draft mode, the ACL entity refers to this live entity parent. In non-drafted mode, this is null
This attribute is named `associatedLiveTemplateID` in VSD API.
"""
return self._associated_live_template_id
@associated_live_template_id.setter
def associated_live_template_id(self, value):
""" Set associated_live_template_id value.
Notes:
In the draft mode, the ACL entity refers to this live entity parent. In non-drafted mode, this is null
This attribute is named `associatedLiveTemplateID` in VSD API.
"""
self._associated_live_template_id = value
@property
def associated_traffic_type(self):
""" Get associated_traffic_type value.
Notes:
The associated Traffic type. L4 Service / L4 Service Group
This attribute is named `associatedTrafficType` in VSD API.
"""
return self._associated_traffic_type
@associated_traffic_type.setter
def associated_traffic_type(self, value):
""" Set associated_traffic_type value.
Notes:
The associated Traffic type. L4 Service / L4 Service Group
This attribute is named `associatedTrafficType` in VSD API.
"""
self._associated_traffic_type = value
@property
def associated_traffic_type_id(self):
""" Get associated_traffic_type_id value.
Notes:
The associated Traffic Type ID
This attribute is named `associatedTrafficTypeID` in VSD API.
"""
return self._associated_traffic_type_id
@associated_traffic_type_id.setter
def associated_traffic_type_id(self, value):
""" Set associated_traffic_type_id value.
Notes:
The associated Traffic Type ID
This attribute is named `associatedTrafficTypeID` in VSD API.
"""
self._associated_traffic_type_id = value
@property
def associatedfirewall_aclid(self):
""" Get associatedfirewall_aclid value.
Notes:
Associated Firewall Acl ID
This attribute is named `associatedfirewallACLID` in VSD API.
"""
return self._associatedfirewall_aclid
@associatedfirewall_aclid.setter
def associatedfirewall_aclid(self, value):
""" Set associatedfirewall_aclid value.
Notes:
Associated Firewall Acl ID
This attribute is named `associatedfirewallACLID` in VSD API.
"""
self._associatedfirewall_aclid = value
@property
def stateful(self):
""" Get stateful value.
Notes:
true means that this ACL entry is stateful, so there will be a corresponding rule that will be created by OVS in the network. false means that there is no correspondingrule created by OVS in the network
"""
return self._stateful
@stateful.setter
def stateful(self, value):
""" Set stateful value.
Notes:
true means that this ACL entry is stateful, so there will be a corresponding rule that will be created by OVS in the network. false means that there is no correspondingrule created by OVS in the network
"""
self._stateful = value
@property
def stats_id(self):
""" Get stats_id value.
Notes:
The statsID that is created in the VSD and identifies this ACL Template Entry.. This is auto-generated by VSD
This attribute is named `statsID` in VSD API.
"""
return self._stats_id
@stats_id.setter
def stats_id(self, value):
""" Set stats_id value.
Notes:
The statsID that is created in the VSD and identifies this ACL Template Entry.. This is auto-generated by VSD
This attribute is named `statsID` in VSD API.
"""
self._stats_id = value
@property
def stats_logging_enabled(self):
""" Get stats_logging_enabled value.
Notes:
Is stats logging enabled for this particular template
This attribute is named `statsLoggingEnabled` in VSD API.
"""
return self._stats_logging_enabled
@stats_logging_enabled.setter
def stats_logging_enabled(self, value):
""" Set stats_logging_enabled value.
Notes:
Is stats logging enabled for this particular template
This attribute is named `statsLoggingEnabled` in VSD API.
"""
self._stats_logging_enabled = value
@property
def ether_type(self):
""" Get ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
return self._ether_type
@ether_type.setter
def ether_type(self, value):
""" Set ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
self._ether_type = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
from functools import partial
from collections import deque
from llvmlite import ir
from numba.core.datamodel.registry import register_default
from numba.core import types, cgutils
from numba.np import numpy_support
class DataModel(object):
"""
DataModel describe how a FE type is represented in the LLVM IR at
different contexts.
Contexts are:
- value: representation inside function body. Maybe stored in stack.
The representation here are flexible.
- data: representation used when storing into containers (e.g. arrays).
- argument: representation used for function argument. All composite
types are unflattened into multiple primitive types.
- return: representation used for return argument.
Throughput the compiler pipeline, a LLVM value is usually passed around
in the "value" representation. All "as_" prefix function converts from
"value" representation. All "from_" prefix function converts to the
"value" representation.
"""
def __init__(self, dmm, fe_type):
self._dmm = dmm
self._fe_type = fe_type
@property
def fe_type(self):
return self._fe_type
def get_value_type(self):
raise NotImplementedError(self)
def get_data_type(self):
return self.get_value_type()
def get_argument_type(self):
"""Return a LLVM type or nested tuple of LLVM type
"""
return self.get_value_type()
def get_return_type(self):
return self.get_value_type()
def as_data(self, builder, value):
raise NotImplementedError(self)
def as_argument(self, builder, value):
"""
Takes one LLVM value
Return a LLVM value or nested tuple of LLVM value
"""
raise NotImplementedError(self)
def as_return(self, builder, value):
raise NotImplementedError(self)
def from_data(self, builder, value):
raise NotImplementedError(self)
def from_argument(self, builder, value):
"""
Takes a LLVM value or nested tuple of LLVM value
Returns one LLVM value
"""
raise NotImplementedError(self)
def from_return(self, builder, value):
raise NotImplementedError(self)
def load_from_data_pointer(self, builder, ptr, align=None):
"""
Load value from a pointer to data.
This is the default implementation, sufficient for most purposes.
"""
return self.from_data(builder, builder.load(ptr, align=align))
def traverse(self, builder):
"""
Traverse contained members.
Returns a iterable of contained (types, getters).
Each getter is a one-argument function accepting a LLVM value.
"""
return []
def traverse_models(self):
"""
Recursively list all models involved in this model.
"""
return [self._dmm[t] for t in self.traverse_types()]
def traverse_types(self):
"""
Recursively list all frontend types involved in this model.
"""
types = [self._fe_type]
queue = deque([self])
while len(queue) > 0:
dm = queue.popleft()
for i_dm in dm.inner_models():
if i_dm._fe_type not in types:
queue.append(i_dm)
types.append(i_dm._fe_type)
return types
def inner_models(self):
"""
List all *inner* models.
"""
return []
def get_nrt_meminfo(self, builder, value):
"""
Returns the MemInfo object or None if it is not tracked.
It is only defined for types.meminfo_pointer
"""
return None
def has_nrt_meminfo(self):
return False
def contains_nrt_meminfo(self):
"""
Recursively check all contained types for need for NRT meminfo.
"""
return any(model.has_nrt_meminfo() for model in self.traverse_models())
def _compared_fields(self):
return (type(self), self._fe_type)
def __hash__(self):
return hash(tuple(self._compared_fields()))
def __eq__(self, other):
if type(self) is type(other):
return self._compared_fields() == other._compared_fields()
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@register_default(types.Omitted)
class OmittedArgDataModel(DataModel):
"""
A data model for omitted arguments. Only the "argument" representation
is defined, other representations raise a NotImplementedError.
"""
# Omitted arguments don't produce any LLVM function argument.
def get_argument_type(self):
return ()
def as_argument(self, builder, val):
return ()
def from_argument(self, builder, val):
assert val == (), val
return None
@register_default(types.Boolean)
@register_default(types.BooleanLiteral)
class BooleanModel(DataModel):
_bit_type = ir.IntType(1)
_byte_type = ir.IntType(8)
def get_value_type(self):
return self._bit_type
def get_data_type(self):
return self._byte_type
def get_return_type(self):
return self.get_data_type()
def get_argument_type(self):
return self.get_data_type()
def as_data(self, builder, value):
return builder.zext(value, self.get_data_type())
def as_argument(self, builder, value):
return self.as_data(builder, value)
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_data(self, builder, value):
ty = self.get_value_type()
resalloca = cgutils.alloca_once(builder, ty)
cond = builder.icmp_unsigned('==', value, value.type(0))
with builder.if_else(cond) as (then, otherwise):
with then:
builder.store(ty(0), resalloca)
with otherwise:
builder.store(ty(1), resalloca)
return builder.load(resalloca)
def from_argument(self, builder, value):
return self.from_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
class PrimitiveModel(DataModel):
"""A primitive type can be represented natively in the target in all
usage contexts.
"""
def __init__(self, dmm, fe_type, be_type):
super(PrimitiveModel, self).__init__(dmm, fe_type)
self.be_type = be_type
def get_value_type(self):
return self.be_type
def as_data(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_data(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def from_return(self, builder, value):
return value
class ProxyModel(DataModel):
"""
Helper class for models which delegate to another model.
"""
def get_value_type(self):
return self._proxied_model.get_value_type()
def get_data_type(self):
return self._proxied_model.get_data_type()
def get_return_type(self):
return self._proxied_model.get_return_type()
def get_argument_type(self):
return self._proxied_model.get_argument_type()
def as_data(self, builder, value):
return self._proxied_model.as_data(builder, value)
def as_argument(self, builder, value):
return self._proxied_model.as_argument(builder, value)
def as_return(self, builder, value):
return self._proxied_model.as_return(builder, value)
def from_data(self, builder, value):
return self._proxied_model.from_data(builder, value)
def from_argument(self, builder, value):
return self._proxied_model.from_argument(builder, value)
def from_return(self, builder, value):
return self._proxied_model.from_return(builder, value)
@register_default(types.EnumMember)
@register_default(types.IntEnumMember)
class EnumModel(ProxyModel):
"""
Enum members are represented exactly like their values.
"""
def __init__(self, dmm, fe_type):
super(EnumModel, self).__init__(dmm, fe_type)
self._proxied_model = dmm.lookup(fe_type.dtype)
@register_default(types.Opaque)
@register_default(types.PyObject)
@register_default(types.RawPointer)
@register_default(types.NoneType)
@register_default(types.StringLiteral)
@register_default(types.EllipsisType)
@register_default(types.Function)
@register_default(types.Type)
@register_default(types.Object)
@register_default(types.Module)
@register_default(types.Phantom)
@register_default(types.ContextManager)
@register_default(types.Dispatcher)
@register_default(types.ObjModeDispatcher)
@register_default(types.ExceptionClass)
@register_default(types.Dummy)
@register_default(types.ExceptionInstance)
@register_default(types.ExternalFunction)
@register_default(types.EnumClass)
@register_default(types.IntEnumClass)
@register_default(types.NumberClass)
@register_default(types.TypeRef)
@register_default(types.NamedTupleClass)
@register_default(types.DType)
@register_default(types.RecursiveCall)
@register_default(types.MakeFunctionLiteral)
@register_default(types.Poison)
class OpaqueModel(PrimitiveModel):
"""
Passed as opaque pointers
"""
_ptr_type = ir.IntType(8).as_pointer()
def __init__(self, dmm, fe_type):
be_type = self._ptr_type
super(OpaqueModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.MemInfoPointer)
class MemInfoModel(OpaqueModel):
def inner_models(self):
return [self._dmm.lookup(self._fe_type.dtype)]
def has_nrt_meminfo(self):
return True
def get_nrt_meminfo(self, builder, value):
return value
@register_default(types.Integer)
@register_default(types.IntegerLiteral)
class IntegerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
be_type = ir.IntType(fe_type.bitwidth)
super(IntegerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.Float)
class FloatModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
if fe_type == types.float32:
be_type = ir.FloatType()
elif fe_type == types.float64:
be_type = ir.DoubleType()
else:
raise NotImplementedError(fe_type)
super(FloatModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.CPointer)
class PointerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
self._pointee_model = dmm.lookup(fe_type.dtype)
self._pointee_be_type = self._pointee_model.get_data_type()
be_type = self._pointee_be_type.as_pointer()
super(PointerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.EphemeralPointer)
class EphemeralPointerModel(PointerModel):
def get_data_type(self):
return self._pointee_be_type
def as_data(self, builder, value):
value = builder.load(value)
return self._pointee_model.as_data(builder, value)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.EphemeralArray)
class EphemeralArrayModel(PointerModel):
def __init__(self, dmm, fe_type):
super(EphemeralArrayModel, self).__init__(dmm, fe_type)
self._data_type = ir.ArrayType(self._pointee_be_type,
self._fe_type.count)
def get_data_type(self):
return self._data_type
def as_data(self, builder, value):
values = [builder.load(cgutils.gep_inbounds(builder, value, i))
for i in range(self._fe_type.count)]
return cgutils.pack_array(builder, values)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.ExternalFunctionPointer)
class ExternalFuncPointerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
sig = fe_type.sig
# Since the function is non-Numba, there is no adaptation
# of arguments and return value, hence get_value_type().
retty = dmm.lookup(sig.return_type).get_value_type()
args = [dmm.lookup(t).get_value_type() for t in sig.args]
be_type = ir.PointerType(ir.FunctionType(retty, args))
super(ExternalFuncPointerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.UniTuple)
@register_default(types.NamedUniTuple)
@register_default(types.StarArgUniTuple)
class UniTupleModel(DataModel):
def __init__(self, dmm, fe_type):
super(UniTupleModel, self).__init__(dmm, fe_type)
self._elem_model = dmm.lookup(fe_type.dtype)
self._count = len(fe_type)
self._value_type = ir.ArrayType(self._elem_model.get_value_type(),
self._count)
self._data_type = ir.ArrayType(self._elem_model.get_data_type(),
self._count)
def get_value_type(self):
return self._value_type
def get_data_type(self):
return self._data_type
def get_return_type(self):
return self.get_value_type()
def get_argument_type(self):
return (self._elem_model.get_argument_type(),) * self._count
def as_argument(self, builder, value):
out = []
for i in range(self._count):
v = builder.extract_value(value, [i])
v = self._elem_model.as_argument(builder, v)
out.append(v)
return out
def from_argument(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i, v in enumerate(value):
v = self._elem_model.from_argument(builder, v)
out = builder.insert_value(out, v, [i])
return out
def as_data(self, builder, value):
out = ir.Constant(self.get_data_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.as_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def from_data(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.from_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def traverse(self, builder):
def getter(i, value):
return builder.extract_value(value, i)
return [(self._fe_type.dtype, partial(getter, i))
for i in range(self._count)]
def inner_models(self):
return [self._elem_model]
class CompositeModel(DataModel):
"""Any model that is composed of multiple other models should subclass from
this.
"""
pass
class StructModel(CompositeModel):
_value_type = None
_data_type = None
def __init__(self, dmm, fe_type, members):
super(StructModel, self).__init__(dmm, fe_type)
if members:
self._fields, self._members = zip(*members)
else:
self._fields = self._members = ()
self._models = tuple([self._dmm.lookup(t) for t in self._members])
def get_member_fe_type(self, name):
"""
StructModel-specific: get the Numba type of the field named *name*.
"""
pos = self.get_field_position(name)
return self._members[pos]
def get_value_type(self):
if self._value_type is None:
self._value_type = ir.LiteralStructType([t.get_value_type()
for t in self._models])
return self._value_type
def get_data_type(self):
if self._data_type is None:
self._data_type = ir.LiteralStructType([t.get_data_type()
for t in self._models])
return self._data_type
def get_argument_type(self):
return tuple([t.get_argument_type() for t in self._models])
def get_return_type(self):
return self.get_data_type()
def _as(self, methname, builder, value):
extracted = []
for i, dm in enumerate(self._models):
extracted.append(getattr(dm, methname)(builder,
self.get(builder, value, i)))
return tuple(extracted)
def _from(self, methname, builder, value):
struct = ir.Constant(self.get_value_type(), ir.Undefined)
for i, (dm, val) in enumerate(zip(self._models, value)):
v = getattr(dm, methname)(builder, val)
struct = self.set(builder, struct, v, i)
return struct
def as_data(self, builder, value):
"""
Converts the LLVM struct in `value` into a representation suited for
storing into arrays.
Note
----
Current implementation rarely changes how types are represented for
"value" and "data". This is usually a pointless rebuild of the
immutable LLVM struct value. Luckily, LLVM optimization removes all
redundancy.
Sample usecase: Structures nested with pointers to other structures
that can be serialized into a flat representation when storing into
array.
"""
elems = self._as("as_data", builder, value)
struct = ir.Constant(self.get_data_type(), ir.Undefined)
for i, el in enumerate(elems):
struct = builder.insert_value(struct, el, [i])
return struct
def from_data(self, builder, value):
"""
Convert from "data" representation back into "value" representation.
Usually invoked when loading from array.
See notes in `as_data()`
"""
vals = [builder.extract_value(value, [i])
for i in range(len(self._members))]
return self._from("from_data", builder, vals)
def load_from_data_pointer(self, builder, ptr, align=None):
values = []
for i, model in enumerate(self._models):
elem_ptr = cgutils.gep_inbounds(builder, ptr, 0, i)
val = model.load_from_data_pointer(builder, elem_ptr, align)
values.append(val)
struct = ir.Constant(self.get_value_type(), ir.Undefined)
for i, val in enumerate(values):
struct = self.set(builder, struct, val, i)
return struct
def as_argument(self, builder, value):
return self._as("as_argument", builder, value)
def from_argument(self, builder, value):
return self._from("from_argument", builder, value)
def as_return(self, builder, value):
elems = self._as("as_data", builder, value)
struct = ir.Constant(self.get_data_type(), ir.Undefined)
for i, el in enumerate(elems):
struct = builder.insert_value(struct, el, [i])
return struct
def from_return(self, builder, value):
vals = [builder.extract_value(value, [i])
for i in range(len(self._members))]
return self._from("from_data", builder, vals)
def get(self, builder, val, pos):
"""Get a field at the given position or the fieldname
Args
----
builder:
LLVM IRBuilder
val:
value to be inserted
pos: int or str
field index or field name
Returns
-------
Extracted value
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return builder.extract_value(val, [pos],
name="extracted." + self._fields[pos])
def set(self, builder, stval, val, pos):
"""Set a field at the given position or the fieldname
Args
----
builder:
LLVM IRBuilder
stval:
LLVM struct value
val:
value to be inserted
pos: int or str
field index or field name
Returns
-------
A new LLVM struct with the value inserted
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return builder.insert_value(stval, val, [pos],
name="inserted." + self._fields[pos])
def get_field_position(self, field):
try:
return self._fields.index(field)
except ValueError:
raise KeyError("%s does not have a field named %r"
% (self.__class__.__name__, field))
@property
def field_count(self):
return len(self._fields)
def get_type(self, pos):
"""Get the frontend type (numba type) of a field given the position
or the fieldname
Args
----
pos: int or str
field index or field name
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return self._members[pos]
def get_model(self, pos):
"""
Get the datamodel of a field given the position or the fieldname.
Args
----
pos: int or str
field index or field name
"""
return self._models[pos]
def traverse(self, builder):
def getter(k, value):
if value.type != self.get_value_type():
args = self.get_value_type(), value.type
raise TypeError("expecting {0} but got {1}".format(*args))
return self.get(builder, value, k)
return [(self.get_type(k), partial(getter, k)) for k in self._fields]
def inner_models(self):
return self._models
@register_default(types.Complex)
class ComplexModel(StructModel):
_element_type = NotImplemented
def __init__(self, dmm, fe_type):
members = [
('real', fe_type.underlying_float),
('imag', fe_type.underlying_float),
]
super(ComplexModel, self).__init__(dmm, fe_type, members)
@register_default(types.LiteralList)
@register_default(types.LiteralStrKeyDict)
@register_default(types.Tuple)
@register_default(types.NamedTuple)
@register_default(types.StarArgTuple)
class TupleModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('f' + str(i), t) for i, t in enumerate(fe_type)]
super(TupleModel, self).__init__(dmm, fe_type, members)
@register_default(types.UnionType)
class UnionModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('tag', types.uintp),
# XXX: it should really be a MemInfoPointer(types.voidptr)
('payload', types.Tuple.from_types(fe_type.types)),
]
super(UnionModel, self).__init__(dmm, fe_type, members)
@register_default(types.Pair)
class PairModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('first', fe_type.first_type),
('second', fe_type.second_type)]
super(PairModel, self).__init__(dmm, fe_type, members)
@register_default(types.ListPayload)
class ListPayloadModel(StructModel):
def __init__(self, dmm, fe_type):
# The fields are mutable but the payload is always manipulated
# by reference. This scheme allows mutations of an array to
# be seen by its iterators.
members = [
('size', types.intp),
('allocated', types.intp),
# This member is only used only for reflected lists
('dirty', types.boolean),
# Actually an inlined var-sized array
('data', fe_type.container.dtype),
]
super(ListPayloadModel, self).__init__(dmm, fe_type, members)
@register_default(types.List)
class ListModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type)
members = [
# The meminfo data points to a ListPayload
('meminfo', types.MemInfoPointer(payload_type)),
# This member is only used only for reflected lists
('parent', types.pyobject),
]
super(ListModel, self).__init__(dmm, fe_type, members)
@register_default(types.ListIter)
class ListIterModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type.container)
members = [
# The meminfo data points to a ListPayload (shared with the
# original list object)
('meminfo', types.MemInfoPointer(payload_type)),
('index', types.EphemeralPointer(types.intp)),
]
super(ListIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetEntry)
class SetEntryModel(StructModel):
def __init__(self, dmm, fe_type):
dtype = fe_type.set_type.dtype
members = [
# -1 = empty, -2 = deleted
('hash', types.intp),
('key', dtype),
]
super(SetEntryModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetPayload)
class SetPayloadModel(StructModel):
def __init__(self, dmm, fe_type):
entry_type = types.SetEntry(fe_type.container)
members = [
# Number of active + deleted entries
('fill', types.intp),
# Number of active entries
('used', types.intp),
# Allocated size - 1 (size being a power of 2)
('mask', types.intp),
# Search finger
('finger', types.intp),
# This member is only used only for reflected sets
('dirty', types.boolean),
# Actually an inlined var-sized array
('entries', entry_type),
]
super(SetPayloadModel, self).__init__(dmm, fe_type, members)
@register_default(types.Set)
class SetModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.SetPayload(fe_type)
members = [
# The meminfo data points to a SetPayload
('meminfo', types.MemInfoPointer(payload_type)),
# This member is only used only for reflected sets
('parent', types.pyobject),
]
super(SetModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetIter)
class SetIterModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.SetPayload(fe_type.container)
members = [
# The meminfo data points to a SetPayload (shared with the
# original set object)
('meminfo', types.MemInfoPointer(payload_type)),
# The index into the entries table
('index', types.EphemeralPointer(types.intp)),
]
super(SetIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.Array)
@register_default(types.Buffer)
@register_default(types.ByteArray)
@register_default(types.Bytes)
@register_default(types.MemoryView)
@register_default(types.PyArray)
class ArrayModel(StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [
('meminfo', types.MemInfoPointer(fe_type.dtype)),
('parent', types.pyobject),
('nitems', types.intp),
('itemsize', types.intp),
('data', types.CPointer(fe_type.dtype)),
('shape', types.UniTuple(types.intp, ndim)),
('strides', types.UniTuple(types.intp, ndim)),
]
super(ArrayModel, self).__init__(dmm, fe_type, members)
@register_default(types.ArrayFlags)
class ArrayFlagsModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('parent', fe_type.array_type),
]
super(ArrayFlagsModel, self).__init__(dmm, fe_type, members)
@register_default(types.NestedArray)
class NestedArrayModel(ArrayModel):
def __init__(self, dmm, fe_type):
self._be_type = dmm.lookup(fe_type.dtype).get_data_type()
super(NestedArrayModel, self).__init__(dmm, fe_type)
@register_default(types.Optional)
class OptionalModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.type),
('valid', types.boolean),
]
self._value_model = dmm.lookup(fe_type.type)
super(OptionalModel, self).__init__(dmm, fe_type, members)
def get_return_type(self):
return self._value_model.get_return_type()
def as_return(self, builder, value):
raise NotImplementedError
def from_return(self, builder, value):
return self._value_model.from_return(builder, value)
def traverse(self, builder):
def get_data(value):
valid = get_valid(value)
data = self.get(builder, value, "data")
return builder.select(valid, data, ir.Constant(data.type, None))
def get_valid(value):
return self.get(builder, value, "valid")
return [(self.get_type("data"), get_data),
(self.get_type("valid"), get_valid)]
@register_default(types.Record)
class RecordModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(RecordModel, self).__init__(dmm, fe_type)
self._models = [self._dmm.lookup(t) for _, t in fe_type.members]
self._be_type = ir.ArrayType(ir.IntType(8), fe_type.size)
self._be_ptr_type = self._be_type.as_pointer()
def get_value_type(self):
"""Passed around as reference to underlying data
"""
return self._be_ptr_type
def get_argument_type(self):
return self._be_ptr_type
def get_return_type(self):
return self._be_ptr_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return builder.load(value)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.UnicodeCharSeq)
class UnicodeCharSeq(DataModel):
def __init__(self, dmm, fe_type):
super(UnicodeCharSeq, self).__init__(dmm, fe_type)
charty = ir.IntType(numpy_support.sizeof_unicode_char * 8)
self._be_type = ir.ArrayType(charty, fe_type.count)
def get_value_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return value
def from_data(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
@register_default(types.CharSeq)
class CharSeq(DataModel):
def __init__(self, dmm, fe_type):
super(CharSeq, self).__init__(dmm, fe_type)
charty = ir.IntType(8)
self._be_type = ir.ArrayType(charty, fe_type.count)
def get_value_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return value
def from_data(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
class CContiguousFlatIter(StructModel):
def __init__(self, dmm, fe_type, need_indices):
assert fe_type.array_type.layout == 'C'
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('stride', types.intp),
('index', types.EphemeralPointer(types.intp)),
]
if need_indices:
# For ndenumerate()
members.append(('indices', types.EphemeralArray(types.intp, ndim)))
super(CContiguousFlatIter, self).__init__(dmm, fe_type, members)
class FlatIter(StructModel):
def __init__(self, dmm, fe_type):
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('pointers', types.EphemeralArray(types.CPointer(dtype), ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(FlatIter, self).__init__(dmm, fe_type, members)
@register_default(types.UniTupleIter)
class UniTupleIter(StructModel):
def __init__(self, dmm, fe_type):
members = [('index', types.EphemeralPointer(types.intp)),
('tuple', fe_type.container,)]
super(UniTupleIter, self).__init__(dmm, fe_type, members)
@register_default(types.misc.SliceLiteral)
@register_default(types.SliceType)
class SliceModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('start', types.intp),
('stop', types.intp),
('step', types.intp),
]
super(SliceModel, self).__init__(dmm, fe_type, members)
@register_default(types.NPDatetime)
@register_default(types.NPTimedelta)
class NPDatetimeModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
be_type = ir.IntType(64)
super(NPDatetimeModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.ArrayIterator)
class ArrayIterator(StructModel):
def __init__(self, dmm, fe_type):
# We use an unsigned index to avoid the cost of negative index tests.
members = [('index', types.EphemeralPointer(types.uintp)),
('array', fe_type.array_type)]
super(ArrayIterator, self).__init__(dmm, fe_type, members)
@register_default(types.EnumerateType)
class EnumerateType(StructModel):
def __init__(self, dmm, fe_type):
members = [('count', types.EphemeralPointer(types.intp)),
('iter', fe_type.source_type)]
super(EnumerateType, self).__init__(dmm, fe_type, members)
@register_default(types.ZipType)
class ZipType(StructModel):
def __init__(self, dmm, fe_type):
members = [('iter%d' % i, source_type.iterator_type)
for i, source_type in enumerate(fe_type.source_types)]
super(ZipType, self).__init__(dmm, fe_type, members)
@register_default(types.RangeIteratorType)
class RangeIteratorType(StructModel):
def __init__(self, dmm, fe_type):
int_type = fe_type.yield_type
members = [('iter', types.EphemeralPointer(int_type)),
('stop', int_type),
('step', int_type),
('count', types.EphemeralPointer(int_type))]
super(RangeIteratorType, self).__init__(dmm, fe_type, members)
@register_default(types.Generator)
class GeneratorModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(GeneratorModel, self).__init__(dmm, fe_type)
# XXX Fold this in DataPacker?
self._arg_models = [self._dmm.lookup(t) for t in fe_type.arg_types
if not isinstance(t, types.Omitted)]
self._state_models = [self._dmm.lookup(t) for t in fe_type.state_types]
self._args_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._arg_models])
self._state_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._state_models])
# The whole generator closure
self._be_type = ir.LiteralStructType(
[self._dmm.lookup(types.int32).get_value_type(),
self._args_be_type, self._state_be_type])
self._be_ptr_type = self._be_type.as_pointer()
def get_value_type(self):
"""
The generator closure is passed around as a reference.
"""
return self._be_ptr_type
def get_argument_type(self):
return self._be_ptr_type
def get_return_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
def as_data(self, builder, value):
return builder.load(value)
def from_data(self, builder, value):
stack = cgutils.alloca_once(builder, value.type)
builder.store(value, stack)
return stack
@register_default(types.ArrayCTypes)
class ArrayCTypesModel(StructModel):
def __init__(self, dmm, fe_type):
# ndim = fe_type.ndim
members = [('data', types.CPointer(fe_type.dtype)),
('meminfo', types.MemInfoPointer(fe_type.dtype))]
super(ArrayCTypesModel, self).__init__(dmm, fe_type, members)
@register_default(types.RangeType)
class RangeModel(StructModel):
def __init__(self, dmm, fe_type):
int_type = fe_type.iterator_type.yield_type
members = [('start', int_type),
('stop', int_type),
('step', int_type)]
super(RangeModel, self).__init__(dmm, fe_type, members)
# =============================================================================
@register_default(types.NumpyNdIndexType)
class NdIndexModel(StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [('shape', types.UniTuple(types.intp, ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(NdIndexModel, self).__init__(dmm, fe_type, members)
@register_default(types.NumpyFlatType)
def handle_numpy_flat_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=False)
else:
return FlatIter(dmm, ty)
@register_default(types.NumpyNdEnumerateType)
def handle_numpy_ndenumerate_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=True)
else:
return FlatIter(dmm, ty)
@register_default(types.BoundFunction)
def handle_bound_function(dmm, ty):
# The same as the underlying type
return dmm[ty.this]
@register_default(types.NumpyNdIterType)
class NdIter(StructModel):
def __init__(self, dmm, fe_type):
array_types = fe_type.arrays
ndim = fe_type.ndim
shape_len = ndim if fe_type.need_shaped_indexing else 1
members = [('exhausted', types.EphemeralPointer(types.boolean)),
('arrays', types.Tuple(array_types)),
# The iterator's main shape and indices
('shape', types.UniTuple(types.intp, shape_len)),
('indices', types.EphemeralArray(types.intp, shape_len)),
]
# Indexing state for the various sub-iterators
# XXX use a tuple instead?
for i, sub in enumerate(fe_type.indexers):
kind, start_dim, end_dim, _ = sub
member_name = 'index%d' % i
if kind == 'flat':
# A single index into the flattened array
members.append((member_name, types.EphemeralPointer(types.intp)))
elif kind in ('scalar', 'indexed', '0d'):
# Nothing required
pass
else:
assert 0
# Slots holding values of the scalar args
# XXX use a tuple instead?
for i, ty in enumerate(fe_type.arrays):
if not isinstance(ty, types.Array):
member_name = 'scalar%d' % i
members.append((member_name, types.EphemeralPointer(ty)))
super(NdIter, self).__init__(dmm, fe_type, members)
@register_default(types.DeferredType)
class DeferredStructModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(DeferredStructModel, self).__init__(dmm, fe_type)
self.typename = "deferred.{0}".format(id(fe_type))
self.actual_fe_type = fe_type.get()
def get_value_type(self):
return ir.global_context.get_identified_type(self.typename + '.value')
def get_data_type(self):
return ir.global_context.get_identified_type(self.typename + '.data')
def get_argument_type(self):
return self._actual_model.get_argument_type()
def as_argument(self, builder, value):
inner = self.get(builder, value)
return self._actual_model.as_argument(builder, inner)
def from_argument(self, builder, value):
res = self._actual_model.from_argument(builder, value)
return self.set(builder, self.make_uninitialized(), res)
def from_data(self, builder, value):
self._define()
elem = self.get(builder, value)
value = self._actual_model.from_data(builder, elem)
out = self.make_uninitialized()
return self.set(builder, out, value)
def as_data(self, builder, value):
self._define()
elem = self.get(builder, value)
value = self._actual_model.as_data(builder, elem)
out = self.make_uninitialized(kind='data')
return self.set(builder, out, value)
def from_return(self, builder, value):
return value
def as_return(self, builder, value):
return value
def get(self, builder, value):
return builder.extract_value(value, [0])
def set(self, builder, value, content):
return builder.insert_value(value, content, [0])
def make_uninitialized(self, kind='value'):
self._define()
if kind == 'value':
ty = self.get_value_type()
else:
ty = self.get_data_type()
return ir.Constant(ty, ir.Undefined)
def _define(self):
valty = self.get_value_type()
self._define_value_type(valty)
datty = self.get_data_type()
self._define_data_type(datty)
def _define_value_type(self, value_type):
if value_type.is_opaque:
value_type.set_body(self._actual_model.get_value_type())
def _define_data_type(self, data_type):
if data_type.is_opaque:
data_type.set_body(self._actual_model.get_data_type())
@property
def _actual_model(self):
return self._dmm.lookup(self.actual_fe_type)
def traverse(self, builder):
return [(self.actual_fe_type,
lambda value: builder.extract_value(value, [0]))]
@register_default(types.StructRefPayload)
class StructPayloadModel(StructModel):
"""Model for the payload of a mutable struct
"""
def __init__(self, dmm, fe_typ):
members = tuple(fe_typ.field_dict.items())
super().__init__(dmm, fe_typ, members)
class StructRefModel(StructModel):
"""Model for a mutable struct.
A reference to the payload
"""
def __init__(self, dmm, fe_typ):
dtype = fe_typ.get_data_type()
members = [
("meminfo", types.MemInfoPointer(dtype)),
]
super().__init__(dmm, fe_typ, members)
|
|
"""Calculation of polygonal Field of View (FOV)"""
import functools
import py2d.Math
class Vision:
"""Class for representing a polygonal field of vision (FOV).
It requires a list of obstructors, given as line strips made of lists of vectors (i.e. we have a list of lists of vectors).
The vision polygon will be cached as long as the eye position and obstructors don't change.
>>> obs = [[ py2d.Math.Vector(2,4), py2d.Math.Vector(4, 1), py2d.Math.Vector(7, -2) ],
... [ py2d.py2d.Math.Vector(1,-2), py2d.Math.Vector(6, -3) ],
... [ py2d.Math.Vector(2.5,5), py2d.Math.Vector(3, 4) ]]
>>> radius = 20
>>> eye = py2d.Math.Vector(0,0)
>>> boundary = py2d.Math.Polygon.regular(eye, radius, 4)
>>> v = Vision(obs)
>>> poly = v.get_vision(eye, radius, boundary)
>>> poly.points[0:6]
[Vector(4.000, 1.000), Vector(2.000, 4.000), Vector(2.000, 4.000), Vector(0.000, 20.000), Vector(0.000, 20.000), Vector(-20.000, 0.000)]
>>> poly.points[6:]
[Vector(-20.000, 0.000), Vector(-0.000, -20.000), Vector(-0.000, -20.000), Vector(1.000, -2.000), Vector(1.000, -2.000), Vector(6.000, -3.000), Vector(6.000, -3.000), Vector(7.000, -2.000), Vector(7.000, -2.000)]
"""
def __init__(self, obstructors, debug=False):
"""Create a new vision object.
@type obstructors: list
@param obstructors: A list of obstructors. Obstructors are a list of vectors, so this should be a list of lists.
"""
self.set_obstructors(obstructors)
self.debug = debug
self.debug_points = []
self.debug_linesegs = []
def set_obstructors(self, obstructors):
"""Set new obstructor data for the Vision object.
This will also cause the vision polygon to become invalidated, resulting in a re-calculation the next time you access it.
@type obstructors: list
@param obstructors: A list of obstructors. Obstructors are a list of vectors, so this should be a list of lists.
"""
def flatten_list(l):
return functools.reduce(lambda x,y: x+y, l)
# concatenate list of lists of vectors to a list of vectors
self.obs_points = flatten_list(obstructors)
# convert obstructor line strips to lists of line segments
self.obs_segs = flatten_list([ list(zip(strip, strip[1:])) for strip in obstructors ])
self.cached_vision = None
self.cached_position = None
self.cached_radius = None
def get_vision(self, eye, radius, boundary):
"""Get a vision polygon for a given eye position and boundary Polygon.
@type eye: Vector
@param eye: The position of the viewer (normally the center of the boundary polygon)
@type radius: float
@param radius: The maximum vision radius (normally the radius of the boundary polygon)
@type boundary: Polygon
@param boundary: The boundary polygon that describes the maximal field of vision
"""
if self.cached_vision == None or (self.cached_position - eye).get_length_squared() > 1:
self.calculate(eye, radius, boundary)
return self.cached_vision
def calculate(self, eye, radius, boundary):
"""Re-calculate the vision polygon.
WARNING: You should only call this if you want to re-calculate the vision polygon for some reason.
For normal usage, use L{get_vision} instead!
"""
self.cached_radius = radius
self.cached_position = eye
self.debug_points = []
self.debug_linesegs = []
radius_squared = radius * radius
closest_points = lambda points, reference: sorted(points, key=lambda p: (p - reference).get_length_squared())
def sub_segment(small, big):
return py2d.Math.distance_point_lineseg_squared(small[0], big[0], big[1]) < 0.0001 and py2d.Math.distance_point_lineseg_squared(small[1], big[0], big[1]) < 0.0001
def segment_in_obs(seg):
for line_segment in self.obs_segs:
if sub_segment(seg, line_segment):
return True
return False
def check_visibility(p):
bpoints = set(boundary.points)
if p not in bpoints:
if (eye - p).get_length_squared() > radius_squared: return False
if not boundary.contains_point(p): return False
for line_segment in obs_segs:
if py2d.Math.check_intersect_lineseg_lineseg(eye, p, line_segment[0], line_segment[1]):
if line_segment[0] != p and line_segment[1] != p:
return False
return True
def lineseg_in_radius(seg):
return py2d.Math.distance_point_lineseg_squared(eye, seg[0], seg[1]) <= radius_squared
obs_segs = filter(lineseg_in_radius, self.obs_segs)
# add all obstruction points and boundary points directly visible from the eye
visible_points = list(filter(check_visibility, set(self.obs_points + boundary.points )))
# find all obstructors intersecting the vision polygon
boundary_intersection_points = py2d.Math.intersect_linesegs_linesegs(obs_segs, list(zip(boundary.points, boundary.points[1:])) + [(boundary.points[-1], boundary.points[0])])
if self.debug: self.debug_points.extend([(p, 0xFF0000) for p in visible_points])
if self.debug: self.debug_points.extend([(p, 0x00FFFF) for p in boundary_intersection_points])
# filter boundary_intersection_points to only include visible points
# - need extra code here to handle points on obstructors!
for line_segment in obs_segs:
i = 0
while i < len(boundary_intersection_points):
p = boundary_intersection_points[i]
if py2d.Math.distance_point_lineseg_squared(p, line_segment[0], line_segment[1]) > 0.0001 and py2d.Math.check_intersect_lineseg_lineseg(eye, p, line_segment[0], line_segment[1]):
boundary_intersection_points.remove(p)
else:
i+=1
visible_points += boundary_intersection_points
poly = py2d.Math.Polygon()
poly.add_points(visible_points)
poly.sort_around(eye)
i = 0
while i < len(poly.points):
p = poly.points[i-1]
c = poly.points[i]
n = poly.points[ (i+1) % len(poly.points) ]
# intersect visible point with obstructors and boundary polygon
intersections = set(
py2d.Math.intersect_linesegs_ray(obs_segs, eye, c) + py2d.Math.intersect_poly_ray(boundary.points, eye, c))
intersections = [ip for ip in intersections if ip != c and boundary.contains_point(ip)]
if self.debug: self.debug_points.extend([(pt, 0x00FF00) for pt in intersections])
if intersections:
intersection = min(intersections, key=lambda p: (p - eye).length_squared)
#if self.debug: self.debug_linesegs.append((0xFF00FF, [eye, intersection]))
#if self.debug: print "%d prev: %s current: %s next: %s" % (i, p, c, n)
sio_pc = segment_in_obs((p,c))
sio_cn = segment_in_obs((c,n))
if not sio_pc:
#if self.debug: print "insert %s at %d" % (closest_intersection, i)
poly.points.insert(i, intersection)
i+=1
# We might have wrongly inserted a point before because this insert was missing
# and therefore the current-next check (incorrectly) yielded false. remove the point again
if segment_in_obs((poly.points[i-3], poly.points[i-1])):
#if self.debug: print "Fixing erroneous insert at %d" % (i-2)
poly.points.remove(poly.points[i-2])
i-=1
elif sio_pc and not sio_cn:
#if self.debug: print "insert %s at %d (+)" % (closest_intersection, i+1)
poly.points.insert(i+1, intersection)
i+=1
#elif self.debug:
#print "no insert at %i" % i
i+=1
#if self.debug: print "%d %d" % (i, len(poly.points))
# handle border case where polypoint at 0 is wrongfully inserted before because poly was not finished at -1
if segment_in_obs((poly.points[-1], poly.points[1])):
poly.points[0], poly.points[1] = poly.points[1], poly.points[0]
self.cached_vision = poly
return poly
|
|
"""
A custom manager for working with trees of objects.
"""
from __future__ import unicode_literals
import contextlib
import django
from django.db import models, transaction, connections, router
from django.db.models import F, Max, Q
from django.utils.translation import ugettext as _
from mptt.exceptions import CantDisableUpdates, InvalidMove
__all__ = ('TreeManager',)
COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
class TreeManager(models.Manager):
"""
A manager for working with trees of objects.
"""
def init_from_model(self, model):
"""
Sets things up. This would normally be done in contribute_to_class(),
but Django calls that before we've created our extra tree fields on the
model (which we need). So it's done here instead, after field setup.
"""
# Avoid calling "get_field_by_name()", which populates the related
# models cache and can cause circular imports in complex projects.
# Instead, find the tree_id field using "get_fields_with_model()".
[tree_field] = [fld for fld in model._meta.get_fields_with_model() if fld[0].name == self.tree_id_attr]
if tree_field[1]:
# tree_model is the model that contains the tree fields.
# this is usually just the same as model, but not for derived models.
self.tree_model = tree_field[1]
else:
self.tree_model = model
self._base_manager = None
if self.tree_model is not model:
# _base_manager is the treemanager on tree_model
self._base_manager = self.tree_model._tree_manager
def get_queryset_descendants(self, queryset, include_self=False):
"""
Returns a queryset containing the descendants of all nodes in the
given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
filters = []
assert self.model is queryset.model
opts = queryset.model._mptt_meta
if not queryset:
return self.none()
filters = None
for node in queryset:
lft, rght = node.lft, node.rght
if include_self:
lft -= 1
rght += 1
q = Q(**{
opts.tree_id_attr: getattr(node, opts.tree_id_attr),
'%s__gt' % opts.left_attr: lft,
'%s__lt' % opts.right_attr: rght,
})
if filters is None:
filters = q
else:
filters |= q
return self.filter(filters)
@contextlib.contextmanager
def disable_mptt_updates(self):
"""
Context manager. Disables mptt updates.
NOTE that this context manager causes inconsistencies! MPTT model methods are
not guaranteed to return the correct results.
When to use this method:
If used correctly, this method can be used to speed up bulk updates.
This doesn't do anything clever. It *will* mess up your tree.
You should follow this method with a call to TreeManager.rebuild() to ensure your
tree stays sane, and you should wrap both calls in a transaction.
This is best for updates that span a large part of the table.
If you are doing localised changes (1 tree, or a few trees) consider
using delay_mptt_updates.
If you are making only minor changes to your tree, just let the updates happen.
Transactions:
This doesn't enforce any transactional behavior.
You should wrap this in a transaction to ensure database consistency.
If updates are already disabled on the model, this is a noop.
Usage::
with transaction.atomic():
with MyNode.objects.disable_mptt_updates():
## bulk updates.
MyNode.objects.rebuild()
"""
# Error cases:
if self.model._meta.abstract:
# * an abstract model. Design decision needed - do we disable updates for
# all concrete models that derive from this model?
# I vote no - that's a bit implicit and it's a weird use-case anyway.
# Open to further discussion :)
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's an abstract model" % self.model.__name__
)
elif self.model._meta.proxy:
# * a proxy model. disabling updates would implicitly affect other models
# using the db table. Caller should call this on the manager for the concrete
# model instead, to make the behavior explicit.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's a proxy model. Call the concrete model instead."
% self.model.__name__
)
elif self.tree_model is not self.model:
# * a multiple-inheritance child of an MPTTModel.
# Disabling updates may affect instances of other models in the tree.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it doesn't contain the mptt fields."
% self.model.__name__
)
if not self.model._mptt_updates_enabled:
# already disabled, noop.
yield
else:
self.model._set_mptt_updates_enabled(False)
try:
yield
finally:
self.model._set_mptt_updates_enabled(True)
@contextlib.contextmanager
def delay_mptt_updates(self):
"""
Context manager. Delays mptt updates until the end of a block of bulk processing.
NOTE that this context manager causes inconsistencies! MPTT model methods are
not guaranteed to return the correct results until the end of the context block.
When to use this method:
If used correctly, this method can be used to speed up bulk updates.
This is best for updates in a localised area of the db table, especially if all
the updates happen in a single tree and the rest of the forest is left untouched.
No subsequent rebuild is necessary.
delay_mptt_updates does a partial rebuild of the modified trees (not the whole table).
If used indiscriminately, this can actually be much slower than just letting the updates
occur when they're required.
The worst case occurs when every tree in the table is modified just once.
That results in a full rebuild of the table, which can be *very* slow.
If your updates will modify most of the trees in the table (not a small number of trees),
you should consider using TreeManager.disable_mptt_updates, as it does much fewer
queries.
Transactions:
This doesn't enforce any transactional behavior.
You should wrap this in a transaction to ensure database consistency.
Exceptions:
If an exception occurs before the processing of the block, delayed updates
will not be applied.
Usage::
with transaction.atomic():
with MyNode.objects.delay_mptt_updates():
## bulk updates.
"""
with self.disable_mptt_updates():
if self.model._mptt_is_tracking:
# already tracking, noop.
yield
else:
self.model._mptt_start_tracking()
try:
yield
except Exception:
# stop tracking, but discard results
self.model._mptt_stop_tracking()
raise
results = self.model._mptt_stop_tracking()
partial_rebuild = self.partial_rebuild
for tree_id in results:
partial_rebuild(tree_id)
@property
def parent_attr(self):
return self.model._mptt_meta.parent_attr
@property
def left_attr(self):
return self.model._mptt_meta.left_attr
@property
def right_attr(self):
return self.model._mptt_meta.right_attr
@property
def tree_id_attr(self):
return self.model._mptt_meta.tree_id_attr
@property
def level_attr(self):
return self.model._mptt_meta.level_attr
def _translate_lookups(self, **lookups):
new_lookups = {}
join_parts = '__'.join
for k, v in lookups.items():
parts = k.split('__')
new_parts = []
new_parts__append = new_parts.append
for part in parts:
new_parts__append(getattr(self, part + '_attr', part))
new_lookups[join_parts(new_parts)] = v
return new_lookups
def _mptt_filter(self, qs=None, **filters):
"""
Like self.filter(), but translates name-agnostic filters for MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_filter(qs=qs, **filters)
if qs is None:
qs = self.get_queryset()
return qs.filter(**self._translate_lookups(**filters))
def _mptt_update(self, qs=None, **items):
"""
Like self.update(), but translates name-agnostic MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_update(qs=qs, **items)
if qs is None:
qs = self.get_queryset()
return qs.update(**self._translate_lookups(**items))
def _get_connection(self, **hints):
return connections[router.db_for_write(self.model, **hints)]
def add_related_count(self, queryset, rel_model, rel_field, count_attr,
cumulative=False):
"""
Adds a related item count to a given ``QuerySet`` using its
``extra`` method, for a ``Model`` class which has a relation to
this ``Manager``'s ``Model`` class.
Arguments:
``rel_model``
A ``Model`` class which has a relation to this `Manager``'s
``Model`` class.
``rel_field``
The name of the field in ``rel_model`` which holds the
relation.
``count_attr``
The name of an attribute which should be added to each item in
this ``QuerySet``, containing a count of how many instances
of ``rel_model`` are related to it through ``rel_field``.
``cumulative``
If ``True``, the count will be for each item and all of its
descendants, otherwise it will be for each item itself.
"""
connection = self._get_connection()
qn = connection.ops.quote_name
meta = self.model._meta
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
return queryset.extra(select={count_attr: subquery})
# rant: why oh why would you rename something so widely used?
def get_queryset(self):
"""
Returns a ``QuerySet`` which contains all tree items, ordered in
such a way that that root nodes appear in tree id order and
their subtrees appear in depth-first order.
"""
super_ = super(TreeManager, self)
if django.VERSION < (1, 7):
qs = super_.get_query_set()
else:
qs = super_.get_queryset()
return qs.order_by(self.tree_id_attr, self.left_attr)
if django.VERSION < (1, 7):
# in 1.7+, get_query_set gets defined by the base manager and complains if it's called.
# otherwise, we have to define it ourselves.
get_query_set = get_queryset
def insert_node(self, node, target, position='last-child', save=False, allow_existing_pk=False):
"""
Sets up the tree state for ``node`` (which has not yet been
inserted into in the database) so it will be positioned relative
to a given ``target`` node as specified by ``position`` (when
appropriate) it is inserted, with any neccessary space already
having been made for it.
A ``target`` of ``None`` indicates that ``node`` should be
the last root node.
If ``save`` is ``True``, ``node``'s ``save()`` method will be
called before it is returned.
NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``.
In most cases you should just set the node's parent and let mptt call this during save.
"""
if self._base_manager:
return self._base_manager.insert_node(node, target, position=position, save=save)
if node.pk and not allow_existing_pk and self.filter(pk=node.pk).exists():
raise ValueError(_('Cannot insert a node which has already been saved.'))
if target is None:
tree_id = self._get_next_tree_id()
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
elif target.is_root_node() and position in ['left', 'right']:
target_tree_id = getattr(target, self.tree_id_attr)
if position == 'left':
tree_id = target_tree_id
space_target = target_tree_id - 1
else:
tree_id = target_tree_id + 1
space_target = target_tree_id
self._create_tree_space(space_target)
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
else:
setattr(node, self.left_attr, 0)
setattr(node, self.level_attr, 0)
space_target, level, left, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
tree_id = getattr(parent, self.tree_id_attr)
self._create_space(2, space_target, tree_id)
setattr(node, self.left_attr, -left)
setattr(node, self.right_attr, -left + 1)
setattr(node, self.level_attr, -level)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, parent)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift)
if save:
node.save()
return node
def _move_node(self, node, target, position='last-child', save=True):
if self._base_manager:
return self._base_manager.move_node(node, target, position=position)
if self.tree_model._mptt_is_tracking:
# delegate to insert_node and clean up the gaps later.
return self.insert_node(node, target, position=position, save=save, allow_existing_pk=True)
else:
if target is None:
if node.is_child_node():
self._make_child_root_node(node)
elif target.is_root_node() and position in ('left', 'right'):
self._make_sibling_of_root_node(node, target, position)
else:
if node.is_root_node():
self._move_root_node(node, target, position)
else:
self._move_child_node(node, target, position)
def move_node(self, node, target, position='last-child'):
"""
Moves ``node`` relative to a given ``target`` node as specified
by ``position`` (when appropriate), by examining both nodes and
calling the appropriate method to perform the move.
A ``target`` of ``None`` indicates that ``node`` should be
turned into a root node.
Valid values for ``position`` are ``'first-child'``,
``'last-child'``, ``'left'`` or ``'right'``.
``node`` will be modified to reflect its new tree state in the
database.
This method explicitly checks for ``node`` being made a sibling
of a root node, as this is a special case due to our use of tree
ids to order root nodes.
NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``.
In most cases you should just move the node yourself by setting node.parent.
"""
self._move_node(node, target, position=position)
def root_node(self, tree_id):
"""
Returns the root node of the tree with the given id.
"""
if self._base_manager:
return self._base_manager.root_node(tree_id)
return self._mptt_filter(tree_id=tree_id, parent=None).get()
def root_nodes(self):
"""
Creates a ``QuerySet`` containing root nodes.
"""
if self._base_manager:
return self._base_manager.root_nodes()
return self._mptt_filter(parent=None)
def rebuild(self):
"""
Rebuilds whole tree in database using `parent` link.
"""
if self._base_manager:
return self._base_manager.rebuild()
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
idx = 0
for pk in pks:
idx += 1
rebuild_helper(pk, 1, idx)
def partial_rebuild(self, tree_id):
if self._base_manager:
return self._base_manager.partial_rebuild(tree_id)
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None, tree_id=tree_id)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
if not pks:
return
if len(pks) > 1:
raise RuntimeError("More than one root node with tree_id %d. That's invalid, do a full rebuild." % tree_id)
self._rebuild_helper(pks[0], 1, tree_id)
def _rebuild_helper(self, pk, left, tree_id, level=0):
opts = self.model._mptt_meta
right = left + 1
qs = self._mptt_filter(parent__pk=pk)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
child_ids = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
for child_id in child_ids:
right = rebuild_helper(child_id, right, tree_id, level + 1)
qs = self.model._default_manager.filter(pk=pk)
self._mptt_update(qs,
left=left,
right=right,
level=level,
tree_id=tree_id
)
return right + 1
def _post_insert_update_cached_parent_right(self, instance, right_shift, seen=None):
setattr(instance, self.right_attr, getattr(instance, self.right_attr) + right_shift)
attr = '_%s_cache' % self.parent_attr
if hasattr(instance, attr):
parent = getattr(instance, attr)
if parent:
if not seen:
seen = set()
seen.add(instance)
if parent in seen:
# detect infinite recursion and throw an error
raise InvalidMove
self._post_insert_update_cached_parent_right(parent, right_shift, seen=seen)
def _calculate_inter_tree_move_values(self, node, target, position):
"""
Calculates values required when moving ``node`` relative to
``target`` as specified by ``position``.
"""
left = getattr(node, self.left_attr)
level = getattr(node, self.level_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if position == 'last-child':
space_target = target_right - 1
else:
space_target = target_left
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if position == 'left':
space_target = target_left - 1
else:
space_target = target_right
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_right_change = left - space_target - 1
right_shift = 0
if parent:
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id)
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id)
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.tree_id_attr) + num_trees)
self.tree_model._mptt_track_tree_insertions(target_tree_id + 1, num_trees)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
qs = self.get_queryset()
max_tree_id = list(qs.aggregate(Max(self.tree_id_attr)).values())[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(self, node, level_change,
left_right_change, new_tree_id, parent_pk=None):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
If ``parent_pk`` is ``None``, this indicates that ``node`` is
being moved to a brand new tree as its root node, and will thus
have its parent field set to ``NULL``. Otherwise, ``node`` will
have ``parent_pk`` set for its parent field.
"""
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %(new_parent)s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'new_parent': parent_pk is None and 'NULL' or '%s',
}
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
node.pk,
getattr(node, self.tree_id_attr)
]
if parent_pk is not None:
params.insert(-1, parent_pk)
cursor = connection.cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, None)
node._mptt_cached_fields[self.parent_attr] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target)
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.tree_id_attr, tree_id + 1)
self._make_child_root_node(node, new_tree_id)
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.tree_id_attr)
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.tree_id_attr)
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.tree_id_attr, new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
if self.tree_model._mptt_is_tracking:
self.tree_model._mptt_track_tree_modified(tree_id)
else:
connection = self._get_connection()
qn = connection.ops.quote_name
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
new_tree_id = getattr(target, self.tree_id_attr)
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(node, level_change,
left_right_change, new_tree_id, parent.pk)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
width = right - left + 1
tree_id = getattr(node, self.tree_id_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
if position == 'left':
if target_left > left:
new_left = target_left - width
new_right = target_left - 1
else:
new_left = target_left
new_right = target_left + width - 1
else:
if target_right > right:
new_left = target_right - width + 1
new_right = target_right
else:
new_left = target_right + 1
new_right = target_right + width
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_boundary = min(left, new_left)
right_boundary = max(right, new_right)
left_right_change = new_left - left
gap_size = width
if left_right_change > 0:
gap_size = -gap_size
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
# The level update must come before the left update to keep
# MySQL happy - left seems to refer to the updated value
# immediately after its update has been specified in the query
# with MySQL, but not with SQLite or Postgres.
move_subtree_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(move_subtree_query, [
left, right, level_change,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
node.pk, parent.pk,
tree_id])
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, new_left)
setattr(node, self.right_attr, new_right)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_root_node(self, node, target, position):
"""
Moves root node``node`` to a different tree, inserting it
relative to the given ``target`` node as specified by
``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
tree_id = getattr(node, self.tree_id_attr)
new_tree_id = getattr(target, self.tree_id_attr)
width = right - left + 1
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif tree_id == new_tree_id:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
space_target, level_change, left_right_change, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
# Create space for the tree which will be inserted
self._create_space(width, space_target, new_tree_id)
# Move the root node, making it a child node
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
move_tree_query = """
UPDATE %(table)s
SET %(level)s = %(level)s - %%s,
%(left)s = %(left)s - %%s,
%(right)s = %(right)s - %%s,
%(tree_id)s = %%s,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(left)s >= %%s AND %(left)s <= %%s
AND %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
}
cursor = connection.cursor()
cursor.execute(move_tree_query, [level_change, left_right_change,
left_right_change, new_tree_id, node.pk, parent.pk, left, right,
tree_id])
# Update the former root node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def predict_rating(gender=None,
occupation=None,
zipcode=None,
title=None,
genres=None,
timestamp=None):
""" Predictor for rating from model/5ba500639252736dee002427
Created using BigMLer
"""
import re
tm_tokens = 'tokens_only'
tm_full_term = 'full_terms_only'
tm_all = 'all'
def term_matches(text, field_name, term):
""" Counts the number of occurences of term and its variants in text
"""
forms_list = term_forms[field_name].get(term, [term])
options = term_analysis[field_name]
token_mode = options.get('token_mode', tm_tokens)
case_sensitive = options.get('case_sensitive', False)
first_term = forms_list[0]
if token_mode == tm_full_term:
return full_term_match(text, first_term, case_sensitive)
else:
# In token_mode='all' we will match full terms using equals and
# tokens using contains
if token_mode == tm_all and len(forms_list) == 1:
pattern = re.compile(r'^.+\b.+$', re.U)
if re.match(pattern, first_term):
return full_term_match(text, first_term, case_sensitive)
return term_matches_tokens(text, forms_list, case_sensitive)
def full_term_match(text, full_term, case_sensitive):
"""Counts the match for full terms according to the case_sensitive
option
"""
if not case_sensitive:
text = text.lower()
full_term = full_term.lower()
return 1 if text == full_term else 0
def get_tokens_flags(case_sensitive):
"""Returns flags for regular expression matching depending on text
analysis options
"""
flags = re.U
if not case_sensitive:
flags = (re.I | flags)
return flags
def term_matches_tokens(text, forms_list, case_sensitive):
""" Counts the number of occurrences of the words in forms_list in
the text
"""
flags = get_tokens_flags(case_sensitive)
expression = r'(\b|_)%s(\b|_)' % '(\\b|_)|(\\b|_)'.join(forms_list)
pattern = re.compile(expression, flags=flags)
matches = re.findall(pattern, text)
return len(matches)
term_analysis = {
"title": {
"case_sensitive": False,
"token_mode": u'all',
},
}
term_forms = {
"title": {
u"beauty": [u'beauty', u'beautiful'],
u"day": [u'day', u'days'],
},
}
def item_matches(text, field_name, item):
""" Counts the number of occurrences of item in text
"""
options = item_analysis[field_name]
separator = options.get('separator', ' ')
regexp = options.get('separator_regexp')
if regexp is None:
regexp = r"%s" % re.escape(separator)
return count_items_matches(text, item, regexp)
def count_items_matches(text, item, regexp):
""" Counts the number of occurrences of the item in the text
"""
expression = r'(^|%s)%s($|%s)' % (regexp, item, regexp)
pattern = re.compile(expression, flags=re.U)
matches = re.findall(pattern, text)
return len(matches)
item_analysis = {
"genres": {
"separator": u'$',
},
}
if (genres is None):
return {"prediction": 3.7, "error": 1.28278}
if (item_matches(genres, "genres", u"Comedy") > 0):
if (title is None):
return {"prediction": 3.39535, "error": 1.57231}
if (term_matches(title, "title", u"life") > 0):
return {"prediction":5, "error":0.90777}
if (term_matches(title, "title", u"life") <= 0):
if (term_matches(title, "title", u"forrest gump (1994)") > 0):
return {"prediction":5, "error":1.09624}
if (term_matches(title, "title", u"forrest gump (1994)") <= 0):
if (term_matches(title, "title", u"1983") > 0):
return {"prediction":5, "error":1.08292}
if (term_matches(title, "title", u"1983") <= 0):
if (zipcode is None):
return {"prediction": 3.25316, "error": 1.5086}
if (zipcode > 7753):
if (item_matches(genres, "genres", u"Horror") > 0):
if (timestamp is None):
return {"prediction": 2, "error": 5.08228}
if (timestamp > 978258115):
return {"prediction":1.5, "error":5.26764}
if (timestamp <= 978258115):
return {"prediction":3, "error":5.08228}
if (item_matches(genres, "genres", u"Horror") <= 0):
if (timestamp is None):
return {"prediction": 3.3913, "error": 1.43342}
if (timestamp > 978218758):
if (item_matches(genres, "genres", u"Thriller") > 0):
return {"prediction":2, "error":10.53528}
if (item_matches(genres, "genres", u"Thriller") <= 0):
if (item_matches(genres, "genres", u"Crime") > 0):
return {"prediction":5, "error":0.9578}
if (item_matches(genres, "genres", u"Crime") <= 0):
if (term_matches(title, "title", u"breakfast club, the (1985)") > 0):
return {"prediction":2, "error":1.31722}
if (term_matches(title, "title", u"breakfast club, the (1985)") <= 0):
if (term_matches(title, "title", u"monty") > 0):
return {"prediction":2, "error":1.28344}
if (term_matches(title, "title", u"monty") <= 0):
if (term_matches(title, "title", u"stand by me (1986)") > 0):
return {"prediction":5, "error":1.24322}
if (term_matches(title, "title", u"stand by me (1986)") <= 0):
if (timestamp > 978228710):
if (item_matches(genres, "genres", u"Musical") > 0):
return {"prediction":4.5, "error":5.26764}
if (item_matches(genres, "genres", u"Musical") <= 0):
if (item_matches(genres, "genres", u"Romance") > 0):
if (term_matches(title, "title", u"day") > 0):
return {"prediction":2, "error":1.38964}
if (term_matches(title, "title", u"day") <= 0):
if (timestamp > 978428301):
return {"prediction":4, "error":1.13085}
if (timestamp <= 978428301):
if (term_matches(title, "title", u"shakespeare in love (1998)") > 0):
return {"prediction":4, "error":0.958}
if (term_matches(title, "title", u"shakespeare in love (1998)") <= 0):
return {"prediction":3, "error":0.36209}
if (item_matches(genres, "genres", u"Romance") <= 0):
if (occupation is None):
return {"prediction": 3.65385, "error": 1.31541}
if (occupation == "writer"):
return {"prediction":5, "error":1.31541}
if (occupation != "writer"):
if (item_matches(genres, "genres", u"Drama") > 0):
if (term_matches(title, "title", u"1997") > 0):
return {"prediction":5, "error":1.56826}
if (term_matches(title, "title", u"1997") <= 0):
return {"prediction":4, "error":0.78413}
if (item_matches(genres, "genres", u"Drama") <= 0):
if (timestamp > 978298248):
if (timestamp > 978298391):
if (gender is None):
return {"prediction": 3.6, "error": 1.92072}
if (gender == "Female"):
return {"prediction":4, "error":1.35815}
if (gender == "Male"):
if (term_matches(title, "title", u"1996") > 0):
return {"prediction":4, "error":2.93426}
if (term_matches(title, "title", u"1996") <= 0):
return {"prediction":3, "error":2.07483}
if (timestamp <= 978298391):
return {"prediction":5, "error":2.36951}
if (timestamp <= 978298248):
if (term_matches(title, "title", u"1980") > 0):
return {"prediction":2, "error":1.31017}
if (term_matches(title, "title", u"1980") <= 0):
if (timestamp > 978297750):
if (term_matches(title, "title", u"1999") > 0):
return {"prediction":3, "error":1.14938}
if (term_matches(title, "title", u"1999") <= 0):
return {"prediction":4, "error":0.81274}
if (timestamp <= 978297750):
if (term_matches(title, "title", u"1994") > 0):
return {"prediction":4, "error":1.09476}
if (term_matches(title, "title", u"1994") <= 0):
return {"prediction":3, "error":0.44694}
if (timestamp <= 978228710):
if (timestamp > 978226820):
return {"prediction":5, "error":2.93426}
if (timestamp <= 978226820):
return {"prediction":4, "error":2.07483}
if (timestamp <= 978218758):
if (term_matches(title, "title", u"1994") > 0):
return {"prediction":1, "error":1.96692}
if (term_matches(title, "title", u"1994") <= 0):
if (timestamp > 978174603):
if (term_matches(title, "title", u"1999") > 0):
if (timestamp > 978200667):
return {"prediction":1, "error":3.89486}
if (timestamp <= 978200667):
if (timestamp > 978196617):
return {"prediction":3, "error":2.07483}
if (timestamp <= 978196617):
return {"prediction":2, "error":2.93426}
if (term_matches(title, "title", u"1999") <= 0):
if (occupation is None):
return {"prediction": 3.09091, "error": 1.95519}
if (occupation == "executive/managerial"):
return {"prediction":4, "error":1.38253}
if (occupation != "executive/managerial"):
if (timestamp > 978200651):
if (term_matches(title, "title", u"bride") > 0):
return {"prediction":2, "error":2.36951}
if (term_matches(title, "title", u"bride") <= 0):
if (timestamp > 978202404):
return {"prediction":3, "error":1.35815}
if (timestamp <= 978202404):
if (term_matches(title, "title", u"batman") > 0):
return {"prediction":3, "error":2.93426}
if (term_matches(title, "title", u"batman") <= 0):
return {"prediction":4, "error":2.07483}
if (timestamp <= 978200651):
if (item_matches(genres, "genres", u"Romance") > 0):
return {"prediction":3, "error":2.93426}
if (item_matches(genres, "genres", u"Romance") <= 0):
return {"prediction":2, "error":2.07483}
if (timestamp <= 978174603):
if (term_matches(title, "title", u"1985") > 0):
return {"prediction":5, "error":2.93395}
if (term_matches(title, "title", u"1985") <= 0):
if (occupation is None):
return {"prediction": 3.5, "error": 2.34869}
if (occupation == "sales/marketing"):
return {"prediction":4, "error":2.34869}
if (occupation != "sales/marketing"):
if (timestamp > 978174551):
return {"prediction":4, "error":2.93426}
if (timestamp <= 978174551):
return {"prediction":3, "error":2.07483}
if (zipcode <= 7753):
if (item_matches(genres, "genres", u"Drama") > 0):
return {"prediction":4, "error":2.60606}
if (item_matches(genres, "genres", u"Drama") <= 0):
if (timestamp is None):
return {"prediction": 1.8, "error": 2.93395}
if (timestamp > 978904214):
if (term_matches(title, "title", u"1997") > 0):
return {"prediction":3, "error":2.93426}
if (term_matches(title, "title", u"1997") <= 0):
return {"prediction":2, "error":2.07483}
if (timestamp <= 978904214):
return {"prediction":1, "error":2.07461}
if (item_matches(genres, "genres", u"Comedy") <= 0):
if (title is None):
return {"prediction": 3.82843, "error": 1.25974}
if (term_matches(title, "title", u"1995") > 0):
if (occupation is None):
return {"prediction": 2.66667, "error": 3.25095}
if (occupation == "clerical/admin"):
return {"prediction":1, "error":3.25095}
if (occupation != "clerical/admin"):
if (item_matches(genres, "genres", u"Romance") > 0):
return {"prediction":4, "error":2.47964}
if (item_matches(genres, "genres", u"Romance") <= 0):
if (occupation == "writer"):
return {"prediction":2, "error":2.03402}
if (occupation != "writer"):
return {"prediction":3, "error":1.17434}
if (term_matches(title, "title", u"1995") <= 0):
if (item_matches(genres, "genres", u"Horror") > 0):
if (timestamp is None):
return {"prediction": 3.35, "error": 2.2498}
if (timestamp > 978200824):
if (timestamp > 978876267):
return {"prediction":2, "error":1.97983}
if (timestamp <= 978876267):
if (item_matches(genres, "genres", u"Thriller") > 0):
if (term_matches(title, "title", u"alien") > 0):
return {"prediction":4, "error":2.93426}
if (term_matches(title, "title", u"alien") <= 0):
return {"prediction":3, "error":2.07483}
if (item_matches(genres, "genres", u"Thriller") <= 0):
if (timestamp > 978268588):
if (term_matches(title, "title", u"king") > 0):
return {"prediction":4, "error":2.03402}
if (term_matches(title, "title", u"king") <= 0):
return {"prediction":5, "error":1.17434}
if (timestamp <= 978268588):
if (term_matches(title, "title", u"alien") > 0):
return {"prediction":3, "error":1.56826}
if (term_matches(title, "title", u"alien") <= 0):
return {"prediction":4, "error":0.78413}
if (timestamp <= 978200824):
if (occupation is None):
return {"prediction": 2.42857, "error": 3.28429}
if (occupation == "academic/educator"):
if (term_matches(title, "title", u"1960") > 0):
return {"prediction":4, "error":2.93426}
if (term_matches(title, "title", u"1960") <= 0):
return {"prediction":3, "error":2.07483}
if (occupation != "academic/educator"):
if (term_matches(title, "title", u"bringing") > 0):
return {"prediction":3, "error":3.89486}
if (term_matches(title, "title", u"bringing") <= 0):
if (timestamp > 978200492):
return {"prediction":2, "error":2.93426}
if (timestamp <= 978200492):
return {"prediction":1, "error":2.07483}
if (item_matches(genres, "genres", u"Horror") <= 0):
if (gender is None):
return {"prediction": 3.92135, "error": 1.21004}
if (gender == "Male"):
if (term_matches(title, "title", u"dick tracy (1990)") > 0):
return {"prediction":1, "error":1.29316}
if (term_matches(title, "title", u"dick tracy (1990)") <= 0):
if (occupation is None):
return {"prediction": 3.84892, "error": 1.26101}
if (occupation == "writer"):
if (timestamp is None):
return {"prediction": 3.2, "error": 2.52836}
if (timestamp > 978243869):
if (item_matches(genres, "genres", u"Romance") > 0):
return {"prediction":4, "error":2.5701}
if (item_matches(genres, "genres", u"Romance") <= 0):
if (timestamp > 978246320):
if (timestamp > 978246556):
return {"prediction":2, "error":2.93426}
if (timestamp <= 978246556):
return {"prediction":3, "error":2.07483}
if (timestamp <= 978246320):
return {"prediction":2, "error":1.35815}
if (timestamp <= 978243869):
if (term_matches(title, "title", u"1994") > 0):
return {"prediction":3, "error":3.32155}
if (term_matches(title, "title", u"1994") <= 0):
if (item_matches(genres, "genres", u"Film-Noir") > 0):
return {"prediction":4, "error":2.93426}
if (item_matches(genres, "genres", u"Film-Noir") <= 0):
return {"prediction":4.5, "error":5.26764}
if (occupation != "writer"):
if (term_matches(title, "title", u"2000") > 0):
if (term_matches(title, "title", u"mission") > 0):
return {"prediction":2.5, "error":5.26764}
if (term_matches(title, "title", u"mission") <= 0):
if (term_matches(title, "title", u"cell") > 0):
return {"prediction":3, "error":1.09476}
if (term_matches(title, "title", u"cell") <= 0):
if (timestamp is None):
return {"prediction": 3.6, "error": 1.92072}
if (timestamp > 978217782):
return {"prediction":3, "error":1.35815}
if (timestamp <= 978217782):
return {"prediction":4, "error":1.10893}
if (term_matches(title, "title", u"2000") <= 0):
if (timestamp is None):
return {"prediction": 3.95, "error": 1.26219}
if (timestamp > 978298955):
if (timestamp > 1009669148):
if (term_matches(title, "title", u"1997") > 0):
return {"prediction":5, "error":2.93426}
if (term_matches(title, "title", u"1997") <= 0):
return {"prediction":4, "error":2.07483}
if (timestamp <= 1009669148):
if (term_matches(title, "title", u"1989") > 0):
return {"prediction":5, "error":1.59717}
if (term_matches(title, "title", u"1989") <= 0):
if (term_matches(title, "title", u"1990") > 0):
return {"prediction":2, "error":1.16977}
if (term_matches(title, "title", u"1990") <= 0):
if (item_matches(genres, "genres", u"Film-Noir") > 0):
return {"prediction":4, "error":0.95152}
if (item_matches(genres, "genres", u"Film-Noir") <= 0):
if (term_matches(title, "title", u"1980") > 0):
return {"prediction":4, "error":0.77415}
if (term_matches(title, "title", u"1980") <= 0):
return {"prediction":3, "error":0.25805}
if (timestamp <= 978298955):
if (term_matches(title, "title", u"1987") > 0):
return {"prediction":1, "error":1.28682}
if (term_matches(title, "title", u"1987") <= 0):
if (term_matches(title, "title", u"fight") > 0):
return {"prediction":2, "error":1.23008}
if (term_matches(title, "title", u"fight") <= 0):
if (term_matches(title, "title", u"1993") > 0):
if (timestamp > 978234034):
return {"prediction":4, "error":0.89387}
if (timestamp <= 978234034):
return {"prediction":3, "error":0.77411}
if (term_matches(title, "title", u"1993") <= 0):
if (term_matches(title, "title", u"1996") > 0):
if (occupation == "other"):
return {"prediction":1, "error":2.43201}
if (occupation != "other"):
if (item_matches(genres, "genres", u"Drama") > 0):
return {"prediction":5, "error":1.38965}
if (item_matches(genres, "genres", u"Drama") <= 0):
if (zipcode is None):
return {"prediction": 3.75, "error": 1.96736}
if (zipcode > 94327):
return {"prediction":5, "error":1.96736}
if (zipcode <= 94327):
if (item_matches(genres, "genres", u"Thriller") > 0):
return {"prediction":5, "error":1.90304}
if (item_matches(genres, "genres", u"Thriller") <= 0):
if (occupation == "executive/managerial"):
return {"prediction":3, "error":10.53528}
if (occupation != "executive/managerial"):
if (zipcode > 58365):
return {"prediction":3, "error":0.99163}
if (zipcode <= 58365):
if (timestamp > 978297836):
return {"prediction":3, "error":1.28505}
if (timestamp <= 978297836):
return {"prediction":4, "error":0.57469}
if (term_matches(title, "title", u"1996") <= 0):
if (term_matches(title, "title", u"negotiator") > 0):
return {"prediction":3, "error":0.82118}
if (term_matches(title, "title", u"negotiator") <= 0):
if (item_matches(genres, "genres", u"War") > 0):
if (timestamp > 978201771):
if (timestamp > 978294214):
if (timestamp > 978295884):
return {"prediction":4, "error":2.07483}
if (timestamp <= 978295884):
return {"prediction":5, "error":2.93426}
if (timestamp <= 978294214):
if (timestamp > 978211160):
if (timestamp > 978294061):
return {"prediction":2, "error":2.03402}
if (timestamp <= 978294061):
return {"prediction":3, "error":1.17434}
if (timestamp <= 978211160):
return {"prediction":4, "error":2.47964}
if (timestamp <= 978201771):
return {"prediction":5, "error":2.56453}
if (item_matches(genres, "genres", u"War") <= 0):
if (occupation == "K-12 student"):
if (timestamp > 978146981):
return {"prediction":4, "error":2.93426}
if (timestamp <= 978146981):
return {"prediction":3, "error":2.07483}
if (occupation != "K-12 student"):
if (timestamp > 978201899):
if (timestamp > 978215603):
if (item_matches(genres, "genres", u"Adventure") > 0):
if (zipcode is None):
return {"prediction": 4.72727, "error": 1.09872}
if (zipcode > 22103):
if (term_matches(title, "title", u"1994") > 0):
return {"prediction":4, "error":1.72408}
if (term_matches(title, "title", u"1994") <= 0):
if (term_matches(title, "title", u"king") > 0):
return {"prediction":4, "error":1.92072}
if (term_matches(title, "title", u"king") <= 0):
if (term_matches(title, "title", u"jones") > 0):
return {"prediction":4, "error":2.03402}
if (term_matches(title, "title", u"jones") <= 0):
return {"prediction":5, "error":1.17434}
if (zipcode <= 22103):
return {"prediction":5, "error":0.49136}
if (item_matches(genres, "genres", u"Adventure") <= 0):
if (timestamp > 978294097):
if (term_matches(title, "title", u"1960") > 0):
return {"prediction":3, "error":1.25106}
if (term_matches(title, "title", u"1960") <= 0):
if (timestamp > 978294245):
if (timestamp > 978298584):
return {"prediction":5, "error":0.80826}
if (timestamp <= 978298584):
if (term_matches(title, "title", u"terminator") > 0):
return {"prediction":5, "error":1.18675}
if (term_matches(title, "title", u"terminator") <= 0):
if (term_matches(title, "title", u"1994") > 0):
return {"prediction":5, "error":1.18253}
if (term_matches(title, "title", u"1994") <= 0):
if (occupation == "scientist"):
return {"prediction":5, "error":1.13085}
if (occupation != "scientist"):
if (term_matches(title, "title", u"1976") > 0):
return {"prediction":5, "error":0.958}
if (term_matches(title, "title", u"1976") <= 0):
return {"prediction":4, "error":0.36209}
if (timestamp <= 978294245):
return {"prediction":5, "error":0.60498}
if (timestamp <= 978294097):
if (timestamp > 978230086):
if (timestamp > 978234842):
return {"prediction":4, "error":1.09476}
if (timestamp <= 978234842):
if (term_matches(title, "title", u"truman") > 0):
return {"prediction":4, "error":1.56826}
if (term_matches(title, "title", u"truman") <= 0):
return {"prediction":3, "error":0.78413}
if (timestamp <= 978230086):
if (term_matches(title, "title", u"graduate, the (1967)") > 0):
return {"prediction":3, "error":1.65457}
if (term_matches(title, "title", u"graduate, the (1967)") <= 0):
if (term_matches(title, "title", u"edge") > 0):
return {"prediction":3, "error":1.51877}
if (term_matches(title, "title", u"edge") <= 0):
if (item_matches(genres, "genres", u"Drama") > 0):
if (zipcode is None):
return {"prediction": 4.83333, "error": 1.28505}
if (zipcode > 22103):
return {"prediction":5, "error":0.57469}
if (zipcode <= 22103):
return {"prediction":4, "error":1.28505}
if (item_matches(genres, "genres", u"Drama") <= 0):
if (timestamp > 978227687):
return {"prediction":5, "error":1.56826}
if (timestamp <= 978227687):
return {"prediction":4, "error":0.78413}
if (timestamp <= 978215603):
return {"prediction":3, "error":0.58872}
if (timestamp <= 978201899):
if (term_matches(title, "title", u"lola") > 0):
return {"prediction":4, "error":0.91271}
if (term_matches(title, "title", u"lola") <= 0):
if (term_matches(title, "title", u"1984") > 0):
return {"prediction":4, "error":0.82728}
if (term_matches(title, "title", u"1984") <= 0):
if (term_matches(title, "title", u"terminator") > 0):
return {"prediction":4.5, "error":5.26764}
if (term_matches(title, "title", u"terminator") <= 0):
return {"prediction":5, "error":0.20738}
if (gender == "Female"):
if (timestamp is None):
return {"prediction": 4.26316, "error": 1.16276}
if (timestamp > 978226722):
if (timestamp > 978237189):
if (timestamp > 978238243):
if (term_matches(title, "title", u"1964") > 0):
return {"prediction":3, "error":1.14678}
if (term_matches(title, "title", u"1964") <= 0):
if (term_matches(title, "title", u"1996") > 0):
return {"prediction":3.5, "error":5.26764}
if (term_matches(title, "title", u"1996") <= 0):
if (term_matches(title, "title", u"1975") > 0):
return {"prediction":5, "error":0.95727}
if (term_matches(title, "title", u"1975") <= 0):
if (term_matches(title, "title", u"beauty") > 0):
return {"prediction":5, "error":0.91271}
if (term_matches(title, "title", u"beauty") <= 0):
if (timestamp > 978301752):
if (timestamp > 978302153):
return {"prediction":4, "error":0.93847}
if (timestamp <= 978302153):
if (term_matches(title, "title", u"1982") > 0):
return {"prediction":4, "error":2.93426}
if (term_matches(title, "title", u"1982") <= 0):
return {"prediction":5, "error":2.07483}
if (timestamp <= 978301752):
return {"prediction":4, "error":0.31268}
if (timestamp <= 978238243):
return {"prediction":5, "error":0.82845}
if (timestamp <= 978237189):
if (item_matches(genres, "genres", u"Thriller") > 0):
return {"prediction":4, "error":2.03402}
if (item_matches(genres, "genres", u"Thriller") <= 0):
return {"prediction":3, "error":1.17434}
if (timestamp <= 978226722):
if (term_matches(title, "title", u"1997") > 0):
return {"prediction":3, "error":1.35749}
if (term_matches(title, "title", u"1997") <= 0):
if (item_matches(genres, "genres", u"Adventure") > 0):
if (timestamp > 978153877):
return {"prediction":4, "error":2.07483}
if (timestamp <= 978153877):
return {"prediction":5, "error":2.93426}
if (item_matches(genres, "genres", u"Adventure") <= 0):
if (timestamp > 978152601):
return {"prediction":5, "error":0.25805}
if (timestamp <= 978152601):
return {"prediction":4, "error":0.77415}
def predict(gender=None,
occupation=None,
zipcode=None,
title=None,
genres=None,
timestamp=None):
prediction = predict_rating(gender=gender, occupation=occupation, zipcode=zipcode, title=title, genres=genres, timestamp=timestamp)
return prediction
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import os
import zipfile
try:
from StringIO import StringIO as IOStream
except ImportError: # 3+
from io import BytesIO as IOStream
import base64
from .command import Command
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
try:
str = basestring
except NameError:
pass
class WebElement(object):
"""Represents a DOM element.
Generally, all interesting operations that interact with a document will be
performed through this interface.
All method calls will do a freshness check to ensure that the element
reference is still valid. This essentially determines whether or not the
element is still attached to the DOM. If this test fails, then an
``StaleElementReferenceException`` is thrown, and all future calls to this
instance will fail."""
def __init__(self, parent, id_, w3c=False):
self._parent = parent
self._id = id_
self._w3c = w3c
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}", element="{2}")>'.format(
type(self), self._parent.session_id, self._id)
@property
def tag_name(self):
"""This element's ``tagName`` property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""The text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the given attribute or property of the element.
This method will first try to return the value of a property with the
given name. If a property with that name doesn't exist, it returns the
value of the attribute with the same name. If there's no attribute with
that name, ``None`` is returned.
Values which are considered truthy, that is equals "true" or "false",
are returned as booleans. All other non-``None`` values are returned
as strings. For attributes or properties which do not exist, ``None``
is returned.
:Args:
- name - Name of the attribute/property to retrieve.
Example::
# Check if the "active" CSS class is applied to an element.
is_active = "active" in target_element.get_attribute("class")
"""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = resp['value']
if name != 'value' and attributeValue.lower() in ('true', 'false'):
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Returns whether the element is selected.
Can be used to check if a checkbox or radio button is selected.
"""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Returns whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element within this element's children by ID.
:Args:
- id_ - ID of child element to locate.
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""Finds a list of elements within this element's children by ID.
:Args:
- id_ - Id of child element to find.
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Finds element within this element's children by name.
:Args:
- name - name property of the element to find.
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""Finds a list of elements within this element's children by name.
:Args:
- name - name property to search for.
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
"""Finds a list of elements within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
"""Finds element within this element's children by partially visible link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""Finds a list of elements within this element's children by link text.
:Args:
- link_text - Link text string to search for.
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
"""Finds element within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""Finds a list of elements within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath.
:Args:
xpath - xpath of element to locate. "//input[@class='myelement']"
Note: The base path will be relative to this element's location.
This will select the first link under this element.
::
myelement.find_elements_by_xpath(".//a")
However, this will select the first link on the page.
::
myelement.find_elements_by_xpath("//a")
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the element by xpath.
:Args:
- xpath - xpath locator string.
Note: The base path will be relative to this element's location.
This will select all links under this element.
::
myelement.find_elements_by_xpath(".//a")
However, this will select all links in the page itself.
::
myelement.find_elements_by_xpath("//a")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds element within this element's children by class name.
:Args:
- name - class name to search for.
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds a list of elements within this element's children by class name.
:Args:
- name - class name to search for.
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Finds element within this element's children by CSS selector.
:Args:
- css_selector - CSS selctor string, ex: 'a.nav#home'
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Finds a list of elements within this element's children by CSS selector.
:Args:
- css_selector - CSS selctor string, ex: 'a.nav#home'
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element.
:Args:
- value - A string for typing, or setting form fields. For setting
file inputs, this could be a local file path.
Use this to send simple key events or to fill out form fields::
form_textfield = driver.find_element_by_name('username')
form_textfield.send_keys("admin")
This can also be used to set file inputs.
::
file_input = driver.find_element_by_name('profilePic')
file_input.send_keys("path/to/profilepic.gif")
# Generally it's better to wrap the file path in one of the methods
# in os.path to return the actual path to support cross OS testing.
# file_input.send_keys(os.path.abspath("path/to/profilepic.gif"))
"""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = self.parent.file_detector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = val.__str__()
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element is visible to a user."""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible.
"""
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
"""The size of the element."""
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
"""The value of a CSS property."""
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
"""The location of the element in the renderable canvas."""
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": old_loc['x'],
"y": old_loc['y']}
return new_loc
@property
def rect(self):
"""A dictionary with the size and location of the element."""
return self._execute(Command.GET_ELEMENT_RECT)['value']
@property
def screenshot_as_base64(self):
"""
Gets the screenshot of the current element as a base64 encoded string.
:Usage:
img_b64 = element.screenshot_as_base64
"""
return self._execute(Command.ELEMENT_SCREENSHOT)['value']
@property
def screenshot_as_png(self):
"""
Gets the screenshot of the current element as a binary data.
:Usage:
element_png = element.screenshot_as_png
"""
return base64.b64decode(self.screenshot_as_base64.encode('ascii'))
def screenshot(self, filename):
"""
Gets the screenshot of the current element. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
element.screenshot('/Screenshots/foo.png')
"""
png = self.screenshot_as_png
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
@property
def parent(self):
"""Internal reference to the WebDriver instance this element was found from."""
return self._parent
@property
def id(self):
"""Internal ID used by selenium.
This is mainly for internal use. Simple use cases such as checking if 2
webelements refer to the same element, can be done using ``==``::
if element1 == element2:
print("These 2 are equal")
"""
return self._id
def __eq__(self, element):
return hasattr(element, 'id') and self._id == element.id
def __ne__(self, element):
return not self.__eq__(element)
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def __hash__(self):
return int(hashlib.md5(self._id.encode('utf-8')).hexdigest(), 16)
def _upload(self, filename):
fp = IOStream()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
content = base64.encodestring(fp.getvalue())
if not isinstance(content, str):
content = content.decode('utf-8')
try:
return self._execute(Command.UPLOAD_FILE,
{'file': content})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._deployments_operations import build_calculate_template_hash_request, build_cancel_request, build_check_existence_request, build_create_or_update_request_initial, build_delete_request_initial, build_export_template_request, build_get_request, build_list_request, build_validate_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DeploymentsOperations:
"""DeploymentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. Deleting a template deployment does
not affect the state of the resource group. This is an asynchronous operation that returns a
status of 202 until the template deployment is successfully deleted. The Location response
header contains the URI that is used to obtain the status of the process. While the process is
running, a call to the URI in the Location header returns a status of 202. When the process
finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param resource_group_name: The name of the resource group with the deployment to delete. The
name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to delete.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'} # type: ignore
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param resource_group_name: The name of the resource group with the deployment to check. The
name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to check.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_check_existence_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or link to JSON files.
:param resource_group_name: The name of the resource group to deploy the resources to. The name
is case insensitive. The resource group must already exist.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2016_09_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to get.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'} # type: ignore
@distributed_trace_async
async def cancel(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resource group
partially deployed.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment to cancel.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=self.cancel.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel'} # type: ignore
@distributed_trace_async
async def validate(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2016_09_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_validate_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.validate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate'} # type: ignore
@distributed_trace_async
async def export_template(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment from which to get the template.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_export_template_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=self.export_template.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments for a resource group.
:param resource_group_name: The name of the resource group with the deployments to get. The
name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2016_09_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/'} # type: ignore
@distributed_trace_async
async def calculate_template_hash(
self,
template: Any,
**kwargs: Any
) -> "_models.TemplateHashResult":
"""Calculate the hash of the given template.
:param template: The template provided to calculate hash.
:type template: any
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TemplateHashResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2016_09_01.models.TemplateHashResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TemplateHashResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(template, 'object')
request = build_calculate_template_hash_request(
content_type=content_type,
json=_json,
template_url=self.calculate_template_hash.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('TemplateHashResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate_template_hash.metadata = {'url': '/providers/Microsoft.Resources/calculateTemplateHash'} # type: ignore
|
|
"""Admin extensions for django-reversion."""
from __future__ import unicode_literals
from functools import partial
from django import template
from django.db import models, transaction, connection
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.admin import helpers, options
from django.contrib.admin.util import unquote, quote
from django.contrib.contenttypes.generic import GenericInlineModelAdmin, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.forms.formsets import all_valid
from django.forms.models import model_to_dict
from django.http import HttpResponseRedirect
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.html import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils.formats import localize
from reversion.models import Revision, Version, has_int_pk
from reversion.revisions import default_revision_manager, RegistrationError
class VersionAdmin(admin.ModelAdmin):
"""Abstract admin class for handling version controlled models."""
object_history_template = "reversion/object_history.html"
change_list_template = "reversion/change_list.html"
revision_form_template = None
recover_list_template = None
recover_form_template = None
# The revision manager instance used to manage revisions.
revision_manager = default_revision_manager
# The serialization format to use when registering models with reversion.
reversion_format = "json"
# Whether to ignore duplicate revision data.
ignore_duplicate_revisions = False
# If True, then the default ordering of object_history and recover lists will be reversed.
history_latest_first = False
def _autoregister(self, model, follow=None):
"""Registers a model with reversion, if required."""
if model._meta.proxy:
raise RegistrationError(
"{model} is a proxy model, and cannot be used with django-reversion, register the parent class ({model_parent}) instead.".format( # noqa
model=model.__name__,
model_parent=', '.join(
[x.__name__ for x in model._meta.parents.keys()])
))
if not self.revision_manager.is_registered(model):
follow = follow or []
for parent_cls, field in model._meta.parents.items():
follow.append(field.name)
self._autoregister(parent_cls)
self.revision_manager.register(model, follow=follow, format=self.reversion_format)
@property
def revision_context_manager(self):
"""The revision context manager for this VersionAdmin."""
return self.revision_manager._revision_context_manager
def _introspect_inline_admin(self, inline):
"""Introspects the given inline admin, returning a tuple of (inline_model, follow_field)."""
inline_model = None
follow_field = None
if issubclass(inline, GenericInlineModelAdmin):
inline_model = inline.model
ct_field = inline.ct_field
ct_fk_field = inline.ct_fk_field
for field in self.model._meta.virtual_fields:
if isinstance(field, GenericRelation) and field.rel.to == inline_model and field.object_id_field_name == ct_fk_field and field.content_type_field_name == ct_field:
follow_field = field.name
break
elif issubclass(inline, options.InlineModelAdmin):
inline_model = inline.model
fk_name = inline.fk_name
if not fk_name:
for field in inline_model._meta.fields:
if isinstance(field, (models.ForeignKey, models.OneToOneField)) and issubclass(self.model, field.rel.to):
fk_name = field.name
break
if fk_name and not inline_model._meta.get_field(fk_name).rel.is_hidden():
accessor = inline_model._meta.get_field(fk_name).related.get_accessor_name()
follow_field = accessor
return inline_model, follow_field
def __init__(self, *args, **kwargs):
"""Initializes the VersionAdmin"""
super(VersionAdmin, self).__init__(*args, **kwargs)
# Automatically register models if required.
if not self.revision_manager.is_registered(self.model):
inline_fields = []
for inline in self.inlines:
inline_model, follow_field = self._introspect_inline_admin(inline)
if inline_model:
self._autoregister(inline_model)
if follow_field:
inline_fields.append(follow_field)
self._autoregister(self.model, inline_fields)
# Wrap own methods in manual revision management.
self.add_view = self.revision_context_manager.create_revision(manage_manually=True)(self.add_view)
self.change_view = self.revision_context_manager.create_revision(manage_manually=True)(self.change_view)
self.recover_view = self.revision_context_manager.create_revision(manage_manually=True)(self.recover_view)
self.revision_view = self.revision_context_manager.create_revision(manage_manually=True)(self.revision_view)
self.changelist_view = self.revision_context_manager.create_revision(manage_manually=True)(self.changelist_view)
def _get_template_list(self, template_name):
opts = self.model._meta
return (
"reversion/%s/%s/%s" % (opts.app_label, opts.object_name.lower(), template_name),
"reversion/%s/%s" % (opts.app_label, template_name),
"reversion/%s" % template_name,
)
def get_urls(self):
"""Returns the additional urls used by the Reversion admin."""
urls = super(VersionAdmin, self).get_urls()
admin_site = self.admin_site
opts = self.model._meta
info = opts.app_label, opts.module_name,
reversion_urls = patterns("",
url("^recover/$", admin_site.admin_view(self.recoverlist_view), name='%s_%s_recoverlist' % info),
url("^recover/([^/]+)/$", admin_site.admin_view(self.recover_view), name='%s_%s_recover' % info),
url("^([^/]+)/history/([^/]+)/$", admin_site.admin_view(self.revision_view), name='%s_%s_revision' % info),)
return reversion_urls + urls
def get_revision_instances(self, request, object):
"""Returns all the instances to be used in the object's revision."""
return [object]
def get_revision_data(self, request, object):
"""Returns all the revision data to be used in the object's revision."""
return dict(
(o, self.revision_manager.get_adapter(o.__class__).get_version_data(o))
for o in self.get_revision_instances(request, object)
)
def log_addition(self, request, object):
"""Sets the version meta information."""
super(VersionAdmin, self).log_addition(request, object)
self.revision_manager.save_revision(
self.get_revision_data(request, object),
user = request.user,
comment = _("Initial version."),
ignore_duplicates = self.ignore_duplicate_revisions,
db = self.revision_context_manager.get_db(),
)
def log_change(self, request, object, message):
"""Sets the version meta information."""
super(VersionAdmin, self).log_change(request, object, message)
self.revision_manager.save_revision(
self.get_revision_data(request, object),
user = request.user,
comment = message,
ignore_duplicates = self.ignore_duplicate_revisions,
db = self.revision_context_manager.get_db(),
)
def _order_version_queryset(self, queryset):
"""Applies the correct ordering to the given version queryset."""
if self.history_latest_first:
return queryset.order_by("-pk")
return queryset.order_by("pk")
def recoverlist_view(self, request, extra_context=None):
"""Displays a deleted model to allow recovery."""
# check if user has change or add permissions for model
if not self.has_change_permission(request) and not self.has_add_permission(request):
raise PermissionDenied
model = self.model
opts = model._meta
deleted = self._order_version_queryset(self.revision_manager.get_deleted(self.model))
context = {
"opts": opts,
"app_label": opts.app_label,
"module_name": capfirst(opts.verbose_name),
"title": _("Recover deleted %(name)s") % {"name": force_text(opts.verbose_name_plural)},
"deleted": deleted,
"changelist_url": reverse("%s:%s_%s_changelist" % (self.admin_site.name, opts.app_label, opts.module_name)),
}
extra_context = extra_context or {}
context.update(extra_context)
return render_to_response(self.recover_list_template or self._get_template_list("recover_list.html"),
context, template.RequestContext(request))
def get_revision_form_data(self, request, obj, version):
"""
Returns a dictionary of data to set in the admin form in order to revert
to the given revision.
"""
return version.field_dict
def get_related_versions(self, obj, version, FormSet):
"""Retreives all the related Version objects for the given FormSet."""
object_id = obj.pk
# Get the fk name.
try:
fk_name = FormSet.fk.name
except AttributeError:
# This is a GenericInlineFormset, or similar.
fk_name = FormSet.ct_fk_field.name
# Look up the revision data.
revision_versions = version.revision.version_set.all()
related_versions = dict([(related_version.object_id, related_version)
for related_version in revision_versions
if ContentType.objects.get_for_id(related_version.content_type_id).model_class() == FormSet.model
and force_text(related_version.field_dict[fk_name]) == force_text(object_id)])
return related_versions
def _hack_inline_formset_initial(self, inline, FormSet, formset, obj, version, revert, recover):
"""Hacks the given formset to contain the correct initial data."""
# if the FK this inline formset represents is not being followed, don't process data for it.
# see https://github.com/etianen/django-reversion/issues/222
_, follow_field = self._introspect_inline_admin(inline.__class__)
if follow_field not in self.revision_manager.get_adapter(self.model).follow:
return
# Now we hack it to push in the data from the revision!
initial = []
related_versions = self.get_related_versions(obj, version, FormSet)
formset.related_versions = related_versions
for related_obj in formset.queryset:
if force_text(related_obj.pk) in related_versions:
initial.append(related_versions.pop(force_text(related_obj.pk)).field_dict)
else:
initial_data = model_to_dict(related_obj)
initial_data["DELETE"] = True
initial.append(initial_data)
for related_version in related_versions.values():
initial_row = related_version.field_dict
pk_name = ContentType.objects.get_for_id(related_version.content_type_id).model_class()._meta.pk.name
del initial_row[pk_name]
initial.append(initial_row)
# Reconstruct the forms with the new revision data.
formset.initial = initial
formset.forms = [formset._construct_form(n) for n in range(len(initial))]
# Hack the formset to force a save of everything.
def get_changed_data(form):
return [field.name for field in form.fields]
for form in formset.forms:
form.has_changed = lambda: True
form._get_changed_data = partial(get_changed_data, form=form)
def total_form_count_hack(count):
return lambda: count
formset.total_form_count = total_form_count_hack(len(initial))
def render_revision_form(self, request, obj, version, context, revert=False, recover=False):
"""Renders the object revision form."""
model = self.model
opts = model._meta
object_id = obj.pk
# Generate the model form.
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == "POST":
# This section is copied directly from the model admin change view
# method. Maybe one day there will be a hook for doing this better.
form = ModelForm(request.POST, request.FILES, instance=obj, initial=self.get_revision_form_data(request, obj, version))
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
# HACK: If the value of a file field is None, remove the file from the model.
for field in new_object._meta.fields:
if isinstance(field, models.FileField) and field.name in form.cleaned_data and form.cleaned_data[field.name] is None:
setattr(new_object, field.name, None)
else:
form_validated = False
new_object = obj
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, new_object),
self.get_inline_instances(request)):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(request.POST, request.FILES,
instance=new_object, prefix=prefix,
queryset=inline.queryset(request))
self._hack_inline_formset_initial(inline, FormSet, formset, obj, version, revert, recover)
# Add this hacked formset to the form.
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=True)
form.save_m2m()
for formset in formsets:
# HACK: If the value of a file field is None, remove the file from the model.
related_objects = formset.save(commit=False)
for related_obj, related_form in zip(related_objects, formset.saved_forms):
for field in related_obj._meta.fields:
if isinstance(field, models.FileField) and field.name in related_form.cleaned_data and related_form.cleaned_data[field.name] is None:
setattr(related_obj, field.name, None)
related_obj.save()
formset.save_m2m()
change_message = _("Reverted to previous version, saved on %(datetime)s") % {"datetime": localize(version.revision.date_created)}
self.log_change(request, new_object, change_message)
self.message_user(request, _('The %(model)s "%(name)s" was reverted successfully. You may edit it again below.') % {"model": force_text(opts.verbose_name), "name": force_text(obj)})
# Redirect to the model change form.
if revert:
return HttpResponseRedirect("../../")
elif recover:
return HttpResponseRedirect("../../%s/" % quote(object_id))
else:
assert False
else:
# This is a mutated version of the code in the standard model admin
# change_view. Once again, a hook for this kind of functionality
# would be nice. Unfortunately, it results in doubling the number
# of queries required to construct the formets.
form = ModelForm(instance=obj, initial=self.get_revision_form_data(request, obj, version))
prefixes = {}
for FormSet, inline in zip(self.get_formsets(request, obj), self.get_inline_instances(request)):
# This code is standard for creating the formset.
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset = FormSet(instance=obj, prefix=prefix,
queryset=inline.queryset(request))
self._hack_inline_formset_initial(inline, FormSet, formset, obj, version, revert, recover)
# Add this hacked formset to the form.
formsets.append(formset)
# Generate admin form helper.
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.prepopulated_fields, self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
# Generate formset helpers.
inline_admin_formsets = []
for inline, formset in zip(self.get_inline_instances(request), formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = inline.get_prepopulated_fields(request, obj)
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
# Generate the context.
context.update({"adminform": adminForm,
"object_id": object_id,
"original": obj,
"is_popup": False,
"media": mark_safe(media),
"inline_admin_formsets": inline_admin_formsets,
"errors": helpers.AdminErrorList(form, formsets),
"app_label": opts.app_label,
"add": False,
"change": True,
"revert": revert,
"recover": recover,
"has_add_permission": self.has_add_permission(request),
"has_change_permission": self.has_change_permission(request, obj),
"has_delete_permission": self.has_delete_permission(request, obj),
"has_file_field": True,
"has_absolute_url": False,
"form_url": mark_safe(request.path),
"opts": opts,
"content_type_id": ContentType.objects.get_for_model(self.model).id,
"save_as": False,
"save_on_top": self.save_on_top,
"changelist_url": reverse("%s:%s_%s_changelist" % (self.admin_site.name, opts.app_label, opts.module_name)),
"change_url": reverse("%s:%s_%s_change" % (self.admin_site.name, opts.app_label, opts.module_name), args=(quote(obj.pk),)),
"history_url": reverse("%s:%s_%s_history" % (self.admin_site.name, opts.app_label, opts.module_name), args=(quote(obj.pk),)),
"recoverlist_url": reverse("%s:%s_%s_recoverlist" % (self.admin_site.name, opts.app_label, opts.module_name))})
# Render the form.
if revert:
form_template = self.revision_form_template or self._get_template_list("revision_form.html")
elif recover:
form_template = self.recover_form_template or self._get_template_list("recover_form.html")
else:
assert False
return render_to_response(form_template, context, template.RequestContext(request))
@transaction.atomic
def recover_view(self, request, version_id, extra_context=None):
"""Displays a form that can recover a deleted model."""
# check if user has change or add permissions for model
if not self.has_change_permission(request) and not self.has_add_permission(request):
raise PermissionDenied
version = get_object_or_404(Version, pk=version_id)
obj = version.object_version.object
context = {"title": _("Recover %(name)s") % {"name": version.object_repr},}
context.update(extra_context or {})
return self.render_revision_form(request, obj, version, context, recover=True)
@transaction.atomic
def revision_view(self, request, object_id, version_id, extra_context=None):
"""Displays the contents of the given revision."""
# check if user has change or add permissions for model
if not self.has_change_permission(request):
raise PermissionDenied
object_id = unquote(object_id) # Underscores in primary key get quoted to "_5F"
obj = get_object_or_404(self.model, pk=object_id)
version = get_object_or_404(Version, pk=version_id, object_id=force_text(obj.pk))
# Generate the context.
context = {"title": _("Revert %(name)s") % {"name": force_text(self.model._meta.verbose_name)},}
context.update(extra_context or {})
return self.render_revision_form(request, obj, version, context, revert=True)
def changelist_view(self, request, extra_context=None):
"""Renders the change view."""
opts = self.model._meta
context = {"recoverlist_url": reverse("%s:%s_%s_recoverlist" % (self.admin_site.name, opts.app_label, opts.module_name)),
"add_url": reverse("%s:%s_%s_add" % (self.admin_site.name, opts.app_label, opts.module_name)),}
context.update(extra_context or {})
return super(VersionAdmin, self).changelist_view(request, context)
def history_view(self, request, object_id, extra_context=None):
"""Renders the history view."""
# check if user has change or add permissions for model
if not self.has_change_permission(request):
raise PermissionDenied
object_id = unquote(object_id) # Underscores in primary key get quoted to "_5F"
opts = self.model._meta
action_list = [
{
"revision": version.revision,
"url": reverse("%s:%s_%s_revision" % (self.admin_site.name, opts.app_label, opts.module_name), args=(quote(version.object_id), version.id)),
}
for version
in self._order_version_queryset(self.revision_manager.get_for_object_reference(
self.model,
object_id,
).select_related("revision__user"))
]
# Compile the context.
context = {"action_list": action_list}
context.update(extra_context or {})
return super(VersionAdmin, self).history_view(request, object_id, context)
class VersionMetaAdmin(VersionAdmin):
"""
An enhanced VersionAdmin that annotates the given object with information about
the last version that was saved.
"""
def get_queryset(self, request):
"""Returns the annotated queryset."""
content_type = ContentType.objects.get_for_model(self.model)
pk = self.model._meta.pk
if has_int_pk(self.model):
version_table_field = "object_id_int"
else:
version_table_field = "object_id"
return super(VersionMetaAdmin, self).get_queryset(request).extra(
select = {
"date_modified": """
SELECT MAX(%(revision_table)s.date_created)
FROM %(version_table)s
JOIN %(revision_table)s ON %(revision_table)s.id = %(version_table)s.revision_id
WHERE %(version_table)s.content_type_id = %%s AND %(version_table)s.%(version_table_field)s = %(table)s.%(pk)s
""" % {
"revision_table": connection.ops.quote_name(Revision._meta.db_table),
"version_table": connection.ops.quote_name(Version._meta.db_table),
"table": connection.ops.quote_name(self.model._meta.db_table),
"pk": connection.ops.quote_name(pk.db_column or pk.attname),
"version_table_field": connection.ops.quote_name(version_table_field),
}
},
select_params = (content_type.id,),
)
def get_date_modified(self, obj):
"""Displays the last modified date of the given object, typically for use in a change list."""
return localize(obj.date_modified)
get_date_modified.short_description = "date modified"
|
|
from astropy.table import Table, join
from astropy.io import fits
from scipy.interpolate import griddata, RegularGridInterpolator
import pynrc, webbpsf, os
#inst = webbpsf.NIRCam()
outdir = pynrc.conf.path + 'opd_mod/'
# Read in measured SI Zernike data
data_dir = webbpsf.utils.get_webbpsf_data_path() + '/'
zernike_file = data_dir + 'si_zernikes_isim_cv3.fits'
# Read in Zemax Zernike data to remove edge effects
# zemax_file = outdir + 'si_zernikes_Zemax_wfe.csv'
# Coordinate limits (oversized) for each FoV
v2v3_limits = {}
v2v3_limits['SW'] = {'V2':[-160, 160], 'V3':[-570, -420]}
v2v3_limits['LW'] = v2v3_limits['SW']
v2v3_limits['SWA'] = {'V2':[0, 160], 'V3':[-570, -420]}
v2v3_limits['LWA'] = v2v3_limits['SWA']
v2v3_limits['SWB'] = {'V2':[-160, 0], 'V3':[-570, -420]}
v2v3_limits['LWB'] = v2v3_limits['SWB']
if not os.path.exists(zernike_file):
print('Zernike file does not exist:')
print(' {}'.format(zernike_file))
else:
ztable_full = Table.read(zernike_file)
keys = np.array(ztable_full.keys())
ind_z = ['Zernike' in k for k in keys]
zkeys = keys[ind_z]
# for mod in ['SW', 'LW', 'SWA', 'LWA', 'SWB', 'LWB']:
for mod in ['SWA', 'LWA', 'SWB', 'LWB']:
ind_nrc = ['NIRCam'+mod in row['instrument'] for row in ztable_full]
ind_nrc = np.where(ind_nrc)
# Grab V2/V3 coordinates
# In units of arcmin
v2 = ztable_full[ind_nrc]['V2']
v3 = ztable_full[ind_nrc]['V3']
# Create finer mesh grid
v2_lims = np.array(v2v3_limits[mod]['V2']) / 60.
v3_lims = np.array(v2v3_limits[mod]['V3']) / 60.
dstep = 1. / 60. # 1" steps
xgrid = np.arange(v2_lims[0], v2_lims[1]+dstep, dstep)
ygrid = np.arange(v3_lims[0], v3_lims[1]+dstep, dstep)
X, Y = np.meshgrid(xgrid,ygrid)
extent = [X.min(), X.max(), Y.min(), Y.max()]
# Create a Zernike cube
zcube = []
for k in zkeys:
z = ztable_full[ind_nrc][k]
# There will be some NaNs along the outer borders
zgrid = griddata((v2, v3), z, (X, Y), method='cubic')
ind_nan = np.isnan(zgrid)
# Cut out a square region whose values are not NaNs
xnans = ind_nan.sum(axis=0)
ynans = ind_nan.sum(axis=1)
x_ind = xnans < ygrid.size
y_ind = ynans < xgrid.size
zgrid2 = zgrid[y_ind, :][:, x_ind]
ygrid2 = ygrid[y_ind]
xgrid2 = xgrid[x_ind]
# Remove rows/cols 1 by 1 until no NaNs
while np.isnan(zgrid2.sum()):
zgrid2 = zgrid2[1:-1,1:-1]
ygrid2 = ygrid2[1:-1]
xgrid2 = xgrid2[1:-1]
# Create regular grid interpolator function for extrapolation at NaN's
func = RegularGridInterpolator((ygrid2,xgrid2), zgrid2, method='linear',
bounds_error=False, fill_value=None)
pts = np.array([Y[ind_nan], X[ind_nan]]).transpose()
zgrid[ind_nan] = func(pts)
zcube.append(zgrid)
zcube = np.array(zcube)
hdu = fits.PrimaryHDU(zcube)
hdr = hdu.header
hdr['units'] = 'meters'
hdr['xunits'] = 'Arcmin'
hdr['xmin'] = X.min()
hdr['xmax'] = X.max()
hdr['xdel'] = dstep
hdr['yunits'] = 'Arcmin'
hdr['ymin'] = Y.min()
hdr['ymax'] = Y.max()
hdr['ydel'] = dstep
hdr['wave'] = 2.10 if 'SW' in mod else 3.23
hdr['comment'] = 'X and Y values correspond to V2 and V3 coordinates (arcmin).'
hdr['comment'] = 'Slices in the cube correspond to Zernikes 1 to 36.'
hdr['comment'] = 'Zernike values calculated using 2D cubic interpolation'
hdr['comment'] = 'and linear extrapolation outside gridded data.'
fname = 'NIRCam{}_zernikes_isim_cv3.fits'.format(mod)
hdu.writeto(outdir + fname, overwrite=True)
# Now for coronagraphy
from astropy.io import ascii
zernike_file = outdir + 'si_zernikes_coron_wfe.csv'
v2v3_limits = {}
v2v3_limits['SW'] = {'V2':[-160, 160], 'V3':[-570+30, -420+30]}
v2v3_limits['LW'] = v2v3_limits['SW']
v2v3_limits['SWA'] = {'V2':[0, 160], 'V3':[-570+30, -420+30]}
v2v3_limits['LWA'] = v2v3_limits['SWA']
v2v3_limits['SWB'] = {'V2':[-160, 0], 'V3':[-570+30, -420+30]}
v2v3_limits['LWB'] = v2v3_limits['SWB']
if not os.path.exists(zernike_file):
print('Zernike file does not exist:')
print(' {}'.format(zernike_file))
else:
ztable_full = ascii.read(zernike_file)
ztable_full.rename_column('\ufeffinstrument', 'instrument')
keys = np.array(ztable_full.keys())
ind_z = ['Zernike' in k for k in keys]
zkeys = keys[ind_z]
# for mod in ['SW', 'LW', 'SWA', 'LWA', 'SWB', 'LWB']:
for mod in ['SWA', 'LWA', 'SWB', 'LWB']:
ind_nrc = ['NIRCam'+mod in row['instrument'] for row in ztable_full]
ind_nrc = np.where(ind_nrc)
print(mod, len(ind_nrc[0]))
# Grab V2/V3 coordinates
# In units of arcmin
v2 = ztable_full[ind_nrc]['V2']
v3 = ztable_full[ind_nrc]['V3']
# Create finer mesh grid
v2_lims = np.array(v2v3_limits[mod]['V2']) / 60.
v3_lims = np.array(v2v3_limits[mod]['V3']) / 60.
dstep = 1. / 60. # 1" steps
xgrid = np.arange(v2_lims[0], v2_lims[1]+dstep, dstep)
ygrid = np.arange(v3_lims[0], v3_lims[1]+dstep, dstep)
X, Y = np.meshgrid(xgrid,ygrid)
extent = [X.min(), X.max(), Y.min(), Y.max()]
# Create a Zernike cube
zcube = []
for k in zkeys:
z = ztable_full[ind_nrc][k]
# There will be some NaNs along the outer borders
zgrid = griddata((v2, v3), z, (X, Y), method='cubic')
ind_nan = np.isnan(zgrid)
# Cut out a square region whose values are not NaNs
xnans = ind_nan.sum(axis=0)
ynans = ind_nan.sum(axis=1)
x_ind = xnans < ygrid.size
y_ind = ynans < xgrid.size
zgrid2 = zgrid[y_ind, :][:, x_ind]
ygrid2 = ygrid[y_ind]
xgrid2 = xgrid[x_ind]
# Remove rows/cols 1 by 1 until no NaNs
while np.isnan(zgrid2.sum()):
zgrid2 = zgrid2[1:-1,1:-1]
ygrid2 = ygrid2[1:-1]
xgrid2 = xgrid2[1:-1]
# Create regular grid interpolator function for extrapolation at NaN's
func = RegularGridInterpolator((ygrid2,xgrid2), zgrid2, method='linear',
bounds_error=False, fill_value=None)
pts = np.array([Y[ind_nan], X[ind_nan]]).transpose()
zgrid[ind_nan] = func(pts)
zcube.append(zgrid)
zcube = np.array(zcube)
hdu = fits.PrimaryHDU(zcube)
hdr = hdu.header
hdr['units'] = 'meters'
hdr['xunits'] = 'Arcmin'
hdr['xmin'] = X.min()
hdr['xmax'] = X.max()
hdr['xdel'] = dstep
hdr['yunits'] = 'Arcmin'
hdr['ymin'] = Y.min()
hdr['ymax'] = Y.max()
hdr['ydel'] = dstep
hdr['wave'] = 2.10 if 'SW' in mod else 3.23
hdr['comment'] = 'X and Y values correspond to V2 and V3 coordinates (arcmin).'
hdr['comment'] = 'Slices in the cube correspond to Zernikes 1 to 36.'
hdr['comment'] = 'Zernike values calculated using 2D cubic interpolation'
hdr['comment'] = 'and linear extrapolation outside gridded data.'
fname = 'NIRCam{}_zernikes_coron.fits'.format(mod)
hdu.writeto(outdir + fname, overwrite=True)
# plt.clf()
# plt.contourf(xgrid,ygrid,zcube[i],20)
# #plt.imshow(zcube[i], extent = [X.min(), X.max(), Y.min(), Y.max()])
# plt.scatter(v2,v3,marker='o',c='r',s=5)
# plt.xlim([X.min(),X.max()])
# plt.ylim([Y.min(),Y.max()])
# plt.axes().set_aspect('equal', 'datalim')
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import os
import pytz
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from trello.organization import Organization
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_URL = "/media/"
MEDIA_ROOT = BASE_DIR+"/djanban/media"
#
settings_local_module = 'djanban.settings_local'
if os.environ.get("DJANGO_APP_MODE") == "desktop_app":
settings_local_module = 'djanban.settings_desktop_app'
try:
settings_local = importlib.import_module(settings_local_module)
except ImportError:
print("Please, create a {0} in project directory "
"with SECRET_KEY, DEBUG, DOMAIN, ALLOWED_HOSTS and DATABASES settings".format(settings_local_module))
exit(-1)
DATABASES = settings_local.DATABASES
TEST_DATABASE_PATH = os.path.join(BASE_DIR, "../resources/database/test.db")
if 'test' in sys.argv:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_DATABASE_PATH
}
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = settings_local.SECRET_KEY
# This is the checksum of the destruction password. It is needed to verify the identity of administrator
# that is executing the manage destruct_all option
DESTRUCTION_PASSWORD_CHECKSUM = None
if hasattr(settings_local, "DESTRUCTION_PASSWORD_CHECKSUM"):
DESTRUCTION_PASSWORD_CHECKSUM = settings_local.DESTRUCTION_PASSWORD_CHECKSUM
# Signup restriction by email regex
SIGNUP_ALLOWED_EMAIL_REGEXES = None
if hasattr(settings_local, "SIGNUP_ALLOWED_EMAIL_REGEXES"):
SIGNUP_ALLOWED_EMAIL_REGEXES = settings_local.SIGNUP_ALLOWED_EMAIL_REGEXES
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = settings_local.DEBUG
DOMAIN = settings_local.DOMAIN
PORT = settings_local.PORT
ALLOWED_HOSTS = settings_local.ALLOWED_HOSTS
ADMINS = []
if hasattr(settings_local, "ADMINS"):
ADMINS = settings_local.ADMINS
SITE_ID = 1
# Administrator group
ADMINISTRATOR_GROUP = "Administrators"
DATE_INPUT_FORMATS = ('%Y-%m-%d', '%Y/%m/%d', '%d-%m-%Y', '%d/%m/%Y')
# Application definition
INSTALLED_APPS = [
'async_include',
'captcha',
'ckeditor',
'ckeditor_uploader',
'crequest',
'cuser',
'dal',
'dal_select2',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'djanban.apps.agility_rating',
'djanban.apps.api',
'djanban.apps.anonymizer',
'djanban.apps.base',
'djanban.apps.boards',
'djanban.apps.charts',
'djanban.apps.hourly_rates',
'djanban.apps.index',
'djanban.apps.journal',
'djanban.apps.destructor',
'djanban.apps.dev_times',
'djanban.apps.dev_environment',
'djanban.apps.fetch',
'djanban.apps.forecasters',
'djanban.apps.members',
'djanban.apps.multiboards',
'djanban.apps.niko_niko_calendar',
'djanban.apps.notifications',
'djanban.apps.password_reseter',
'djanban.apps.recurrent_cards',
'djanban.apps.reporter',
'djanban.apps.reports',
'djanban.apps.repositories',
'djanban.apps.requirements',
'djanban.apps.slideshow',
'djanban.apps.visitors',
'djanban.apps.work_hours_packages',
'djanban.apps.workflows',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'crequest.middleware.CrequestMiddleware',
'cuser.middleware.CuserMiddleware'
)
# Based on the tutorial that integrates Django with Angular
# (https://4sw.in/blog/2016/django-angular2-tutorial-part-2/)
ANGULAR_URL = '/ng/'
ANGULAR_URL_REGEX = r'^ng/(?P<path>.*)$'
ANGULAR_ROOT = os.path.join(BASE_DIR, 'angularapp/')
ROOT_URLCONF = 'djanban.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR+"/djanban/templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'wsgi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.member.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.member.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.member.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.member.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = settings_local.LANGUAGE_CODE
TIME_ZONE = settings_local.TIME_ZONE
Organization.TIMEZONE = TIME_ZONE
PYTZ_SERVER_TIME_ZONE = pytz.timezone(TIME_ZONE)
PYTZ_UTC_TIME_ZONE = pytz.timezone('UTC')
if hasattr(settings_local, "DATE_FORMAT"):
DATE_FORMAT = settings_local.DATE_FORMAT
else:
DATE_FORMAT = "Y-m-d"
if hasattr(settings_local, "DATETIME_FORMAT"):
DATETIME_FORMAT = settings_local.DATETIME_FORMAT
else:
DATETIME_FORMAT = "Y-m-d H:i"
USE_I18N = False
USE_L10N = False
USE_TZ = True
LOGIN_URL = '/base/login/'
EMAIL_USE_TLS = settings_local.EMAIL_USE_TLS
EMAIL_HOST = settings_local.EMAIL_HOST
EMAIL_PORT = settings_local.EMAIL_PORT
EMAIL_HOST_USER = settings_local.EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = settings_local.EMAIL_HOST_PASSWORD
DEFAULT_FROM_EMAIL = settings_local.DEFAULT_FROM_EMAIL
SERVER_EMAIL = settings_local.SERVER_EMAIL
# HTTPS configuration
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
USE_X_FORWARDED_HOST = False # Django >= 1.4
USE_X_FORWARDED_PORT = False
SECURE_SSL_REDIRECT = False
SECURE_SSL_HOST = None
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATICFILES_DIRS = [
BASE_DIR + "/djanban/static/"
]
TMP_DIR = BASE_DIR + "/djanban/tmp/"
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR + "/public_html/collectedstatic"
# Captcha image and font size
CAPTCHA_IMAGE_SIZE = (200, 70)
CAPTCHA_FONT_SIZE = 52
# CKEDITOR preferences
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_RESTRICT_BY_USER = False
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_IMAGE_BACKEND = "pillow"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Image', 'Update', 'Link', 'Unlink'],
['RemoveFormat'],
],
},
'basic': {
'toolbar': 'basic'
},
'full': {
'toolbar': 'full'
}
}
|
|
import unittest
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
from chainer.utils import type_check
_scipy_available = True
try:
from scipy import sparse # NOQA
except ImportError:
_scipy_available = False
def _setup_tensor(_min, _max, shape, dtype, threshold=None):
y = numpy.random.uniform(_min, _max, shape).astype(dtype)
if threshold is not None:
y[y < threshold] = 0
return y
@testing.parameterize(*testing.product_dict(
[
{'m': 2, 'n': 3, 'k': 4},
{'m': 3, 'n': 4, 'k': 2},
],
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
[
{'nbatch': 0}, {'nbatch': 1}, {'nbatch': 4},
],
[
{'a_dtype': numpy.float16},
{'a_dtype': numpy.float32},
{'a_dtype': numpy.float64},
],
[
{'b_dtype': numpy.float16},
{'b_dtype': numpy.float32},
{'b_dtype': numpy.float64},
]
))
class TestCooMatMul(unittest.TestCase):
def setUp(self):
a_shape = self._set_shape([self.m, self.k], self.transa)
b_shape = self._set_shape([self.k, self.n], self.transb)
c_shape = self._set_shape([self.m, self.n], False)
self.c_dtype = numpy.result_type(self.a_dtype, self.b_dtype)
self.a = _setup_tensor(.5, 1, a_shape, self.a_dtype, .75)
self.b = _setup_tensor(.5, 1, b_shape, self.b_dtype, .75)
self.gc = _setup_tensor(-1, 1, c_shape, self.c_dtype)
self.gga = _setup_tensor(.5, 1, a_shape, self.a_dtype)
self.gga[numpy.where(self.a < .75)] = 0
self.ggb = _setup_tensor(.5, 1, b_shape, self.b_dtype)
self.ggb[numpy.where(self.b < .75)] = 0
self.forward_answer = self._matmul(self.a, self.b)
def _set_shape(self, shape, trans):
if trans:
shape = [shape[1], shape[0]]
if self.nbatch > 0:
shape = [self.nbatch, shape[0], shape[1]]
return shape
def _matmul(self, a, b):
if self.transa:
a = a.swapaxes(-1, -2)
if self.transb:
b = b.swapaxes(-1, -2)
if hasattr(numpy, 'matmul'):
return numpy.matmul(a, b)
elif a.ndim == 2:
return numpy.dot(a, b)
else:
return numpy.einsum('...ij,...jk->...ik', a, b)
#
# SPDN: sparse A * dense B
#
def check_SPDN_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
sp_a = utils.to_coo(a_data, requires_grad=True)
b = chainer.Variable(b_data)
c = F.sparse_matmul(sp_a, b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_SPDN_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(self.a, self.b)
@attr.gpu
def test_SPDN_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(a, b)
def check_SPDN_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_a = utils.to_coo(a_data)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_backward(
op, (sp_a.data.data, b_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_SPDN_sparse_matmul_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_backward_gpu(self):
self.check_SPDN_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_SPDN_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_a = utils.to_coo(a_data)
sp_gga = utils.to_coo(a_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_double_backward(
op, (sp_a.data.data, b_data),
c_grad, (sp_gga.data.data, b_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_SPDN_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_double_backward_gpu(self):
self.check_SPDN_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
#
# DNSP: dense A * sparse B
#
def check_DNSP_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
a = chainer.Variable(a_data)
sp_b = utils.to_coo(b_data, requires_grad=True)
c = F.sparse_matmul(a, sp_b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_DNSP_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(self.a, self.b)
@attr.gpu
def test_DNSP_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(a, b)
def check_DNSP_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_b = utils.to_coo(b_data)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_backward(
op, (sp_b.data.data, a_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_DNSP_tensordot_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_tensordot_backward_gpu(self):
self.check_DNSP_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_DNSP_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_b = utils.to_coo(b_data)
sp_ggb = utils.to_coo(b_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_double_backward(
op, (sp_b.data.data, a_data),
c_grad, (sp_ggb.data.data, a_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_DNSP_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_sparse_matmul_double_backward_gpu(self):
self.check_DNSP_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
@testing.parameterize(*testing.product_dict(
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
))
class TestCooMatMulInvalid(unittest.TestCase):
def test_invalid_ndim(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_nbatch(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_shape(self):
a = _setup_tensor(.5, 1, (1, 2, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 4, 5), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_inputs(self):
a = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(ValueError):
F.sparse_matmul(sp_a, sp_b, self.transa, self.transb)
with self.assertRaises(ValueError):
F.sparse_matmul(a, b, self.transa, self.transb)
testing.run_module(__name__, __file__)
|
|
from __future__ import absolute_import
from .events import WebhookAction
from .parsers import get_service_handler
def WebhookRequestHandlerFactory(config, event_store, server_status, is_https=False):
"""Factory method for webhook request handler class"""
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError as e:
from http.server import SimpleHTTPRequestHandler
class WebhookRequestHandler(SimpleHTTPRequestHandler, object):
"""Extends the BaseHTTPRequestHandler class and handles the incoming
HTTP requests."""
def __init__(self, *args, **kwargs):
self._config = config
self._event_store = event_store
self._server_status = server_status
self._is_https = is_https
super(WebhookRequestHandler, self).__init__(*args, **kwargs)
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
def do_HEAD(self):
# Web UI needs to be enabled
if not self.validate_web_ui_enabled():
return
# Web UI might require HTTPS
if not self.validate_web_ui_https():
return
# Client needs to be whitelisted
if not self.validate_web_ui_whitelist():
return
# Client needs to authenticate
if not self.validate_web_ui_basic_auth():
return
return SimpleHTTPRequestHandler.do_HEAD(self)
def do_GET(self):
# Web UI needs to be enabled
if not self.validate_web_ui_enabled():
return
# Web UI might require HTTPS
if not self.validate_web_ui_https():
return
# Client needs to be whitelisted
if not self.validate_web_ui_whitelist():
return
# Client needs to authenticate
if not self.validate_web_ui_basic_auth():
return
# Handle status API call
if self.path == "/api/status":
self.handle_status_api()
return
# Serve static file
return SimpleHTTPRequestHandler.do_GET(self)
def handle_status_api(self):
import json
from os import urandom
from base64 import b64encode
data = {
'events': self._event_store.dict_repr(),
'auth-key': self._server_status['auth-key']
}
data.update(self.get_server_status())
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(data).encode('utf-8'))
def do_POST(self):
"""Invoked on incoming POST requests"""
from threading import Timer
import logging
import json
import threading
from urlparse import parse_qs
logger = logging.getLogger()
content_length = int(self.headers.get('content-length'))
request_body = self.rfile.read(content_length).decode('utf-8')
# Extract request headers and make all keys to lowercase (makes them easier to compare)
request_headers = dict(self.headers)
request_headers = dict((k.lower(), v) for k, v in request_headers.items())
action = WebhookAction(self.client_address, request_headers, request_body)
self._event_store.register_action(action)
action.set_waiting(True)
action.log_info('Incoming request from %s:%s' % (self.client_address[0], self.client_address[1]))
# Payloads from GitHub can be delivered as form data. Test the request for this pattern and extract json payload
if request_headers['content-type'] == 'application/x-www-form-urlencoded':
res = parse_qs(request_body.decode('utf-8'))
if 'payload' in res and len(res['payload']) == 1:
request_body = res['payload'][0]
# Test case debug data
test_case = {
'headers': dict(self.headers),
'payload': json.loads(request_body),
'config': {},
'expected': {'status': 200, 'data': [{'deploy': 0}]}
}
try:
# Will raise a ValueError exception if it fails
ServiceRequestHandler = get_service_handler(request_headers, request_body, action)
# Unable to identify the source of the request
if not ServiceRequestHandler:
self.send_error(400, 'Unrecognized service')
test_case['expected']['status'] = 400
action.log_error("Unable to find appropriate handler for request. The source service is not supported")
action.set_waiting(False)
action.set_success(False)
return
service_handler = ServiceRequestHandler(self._config)
action.log_info("Handling the request with %s" % ServiceRequestHandler.__name__)
# Could be GitHubParser, GitLabParser or other
projects = service_handler.get_matching_projects(request_headers, request_body, action)
action.log_info("%s candidates matches the request" % len(projects))
# request_filter = WebhookRequestFilter()
if len(projects) == 0:
self.send_error(400, 'Bad request')
test_case['expected']['status'] = 400
action.log_error("No matching projects")
action.set_waiting(False)
action.set_success(False)
return
# Apply filters
matching_projects = []
for project in projects:
if project.apply_filters(request_headers, request_body, action):
matching_projects.append(project)
# Only keep projects that matches
projects = matching_projects
action.log_info("%s candidates matches after applying filters" % len(projects))
if not service_handler.validate_request(request_headers, request_body, projects, action):
self.send_error(400, 'Bad request')
test_case['expected']['status'] = 400
action.log_warning("Request was rejected due to a secret token mismatch")
action.set_waiting(False)
action.set_success(False)
return
test_case['expected']['status'] = 200
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/plain')
self.end_headers()
if len(projects) == 0:
action.set_waiting(False)
action.set_success(False)
return
action.log_info("Proceeding with %s candidates" % len(projects))
action.set_waiting(False)
action.set_success(True)
for project in projects:
# Schedule the execution of the webhook (git pull and trigger deploy etc)
thread = threading.Thread(target=project.execute_webhook, args=[self._event_store])
thread.start()
# Add additional test case data
test_case['config'] = {
'url': 'url' in project and project['url'],
'branch': 'branch' in project and project['branch'],
'remote': 'remote' in project and project['remote'],
'deploy': 'echo test!'
}
except ValueError as e:
self.send_error(400, 'Unprocessable request')
action.log_warning('Unable to process incoming request from %s:%s' % (self.client_address[0], self.client_address[1]))
test_case['expected']['status'] = 400
action.set_waiting(False)
action.set_success(False)
return
except Exception as e:
self.send_error(500, 'Unable to process request')
test_case['expected']['status'] = 500
action.log_warning("Unable to process request")
action.set_waiting(False)
action.set_success(False)
raise e
finally:
# Save the request as a test case
if 'log-test-case' in self._config and self._config['log-test-case']:
self.save_test_case(test_case)
def log_message(self, format, *args):
"""Overloads the default message logging method to allow messages to
go through our custom logger instead."""
import logging
logger = logging.getLogger()
logger.info("%s - %s" % (self.client_address[0], format%args))
def save_test_case(self, test_case):
"""Log request information in a way it can be used as a test case."""
import time
import json
import os
# Mask some header values
masked_headers = ['x-github-delivery', 'x-hub-signature']
for key in test_case['headers']:
if key in masked_headers:
test_case['headers'][key] = 'xxx'
target = '%s-%s.tc.json' % (self.client_address[0], time.strftime("%Y%m%d%H%M%S"))
if 'log-test-case-dir' in self._config and self._config['log-test-case-dir']:
target = os.path.join(self._config['log-test-case-dir'], target)
file = open(target, 'w')
file.write(json.dumps(test_case, sort_keys=True, indent=4))
file.close()
def get_server_status(self):
"""Generate a copy of the server status object that contains the public IP or hostname."""
server_status = {}
for item in self._server_status.items():
key, value = item
public_host = self.headers.get('host').split(':')[0]
if key == 'http-uri':
server_status[key] = value.replace(self._config['http-host'], public_host)
if key == 'https-uri':
server_status[key] = value.replace(self._config['https-host'], public_host)
if key == 'wss-uri':
server_status[key] = value.replace(self._config['wss-host'], public_host)
return server_status
def validate_web_ui_enabled(self):
"""Verify that the Web UI is enabled"""
if self._config['web-ui-enabled']:
return True
self.send_error(403, "Web UI is not enabled")
return False
def validate_web_ui_https(self):
"""Verify that the request is made over HTTPS"""
if self._is_https:
return True
if not self._config['web-ui-require-https']:
return True
# Attempt to redirect the request to HTTPS
server_status = self.get_server_status()
if 'https-uri' in server_status:
self.send_response(307)
self.send_header('Location', '%s%s' % (server_status['https-uri'], self.path))
self.end_headers()
return False
self.send_error(403, "Web UI is only accessible through HTTPS")
return False
def validate_web_ui_whitelist(self):
"""Verify that the client address is whitelisted"""
# Allow all if whitelist is empty
if len(self._config['web-ui-whitelist']) == 0:
return True
# Verify that client IP is whitelisted
if self.client_address[0] in self._config['web-ui-whitelist']:
return True
self.send_error(403, "%s is not allowed access" % self.client_address[0])
return False
def validate_web_ui_basic_auth(self):
"""Authenticate the user"""
import base64
if not self._config['web-ui-auth-enabled']:
return True
# Verify that a username and password is specified in the config
if self._config['web-ui-username'] is None or self._config['web-ui-password'] is None:
self.send_error(403, "Authentication credentials missing in config")
return False
# Verify that the provided username and password matches the ones in the config
key = base64.b64encode("%s:%s" % (self._config['web-ui-username'], self._config['web-ui-password']))
if self.headers.getheader('Authorization') == 'Basic ' + key:
return True
# Let the client know that authentication is required
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"GAD\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('Not authenticated')
return False
return WebhookRequestHandler
|
|
#!/usr/bin/env python2
#
# This script runs a set of black-box tests on Mailpile using the test
# messages found in `testing/`.
#
# If run with -i as the first argument, it will then drop to an interactive
# python shell for experimenting and manual testing.
#
import os
import sys
import time
import traceback
# Set up some paths
mailpile_root = os.path.join(os.path.dirname(__file__), '..')
mailpile_test = os.path.join(mailpile_root, 'mailpile', 'tests', 'data')
mailpile_send = os.path.join(mailpile_root, 'scripts', 'test-sendmail.sh')
mailpile_home = os.path.join(mailpile_test, 'tmp')
mailpile_gpgh = os.path.join(mailpile_test, 'gpg-keyring')
mailpile_sent = os.path.join(mailpile_home, 'sent.mbx')
# Set the GNUGPHOME variable to our test key
os.environ['GNUPGHOME'] = mailpile_gpgh
# Add the root to our import path, import API and demo plugins
sys.path.append(mailpile_root)
from mailpile.mail_source.mbox import MboxMailSource
from mailpile.mail_source.maildir import MaildirMailSource
from mailpile import Mailpile
##[ Black-box test script ]###################################################
FROM_BRE = [u'from:r\xfanar', u'from:bjarni']
ICELANDIC = u'r\xfanar'
IS_CHARS = (u'\xe1\xe9\xed\xf3\xfa\xfd\xfe\xe6\xf6\xf0\xc1\xc9\xcd\xd3'
u'\xda\xdd\xde\xc6\xd6\xd0')
MY_FROM = '[email protected]'
MY_NAME = 'Mailpile Team'
MY_KEYID = '0x7848252F'
# First, we set up a pristine Mailpile
os.system('rm -rf %s' % mailpile_home)
mp = Mailpile(workdir=mailpile_home)
cfg = config = mp._session.config
ui = mp._session.ui
if '-v' not in sys.argv:
from mailpile.ui import SilentInteraction
mp._session.ui = SilentInteraction(config)
cfg.plugins.load('demos', process_manifest=True)
cfg.plugins.load('hacks', process_manifest=True)
cfg.plugins.load('smtp_server', process_manifest=True)
def contents(fn):
return open(fn, 'r').read()
def grep(w, fn):
return '\n'.join([l for l in open(fn, 'r').readlines() if w in l])
def grepv(w, fn):
return '\n'.join([l for l in open(fn, 'r').readlines() if w not in l])
def say(stuff):
mp._session.ui.mark(stuff)
mp._session.ui.reset_marks()
if '-v' not in sys.argv:
sys.stderr.write('.')
def do_setup():
# Set up initial tags and such
mp.setup('do_gpg_stuff')
# Setup GPG access credentials and TELL EVERYONE!
config.sys.login_banner = 'Pssst! The password is: mailpile'
#config.gnupg_passphrase.set_passphrase('mailpile')
#config.prefs.gpg_recipient = '3D955B5D7848252F'
config.vcards.get(MY_FROM).fn = MY_NAME
config.prefs.default_email = MY_FROM
config.prefs.encrypt_index = True
config.prefs.inline_pgp = False
# Configure our fake mail sending setup
config.sys.http_port = 33414
config.sys.smtpd.host = 'localhost'
config.sys.smtpd.port = 33415
config.prefs.openpgp_header = 'encrypt'
config.prefs.crypto_policy = 'openpgp-sign'
if '-v' in sys.argv:
config.sys.debug = 'log http vcard rescan sendmail log compose'
# Set up dummy conctact importer fortesting, disable Gravatar
mp.set('prefs/vcard/importers/demo/0/name = Mr. Rogers')
mp.set('prefs/vcard/importers/gravatar/0/active = false')
mp.set('prefs/vcard/importers/gpg/0/active = false')
# Make sure that actually worked
assert(not mp._config.prefs.vcard.importers.gpg[0].active)
assert(not mp._config.prefs.vcard.importers.gravatar[0].active)
# Copy the test Maildir...
for mailbox in ('Maildir', 'Maildir2'):
path = os.path.join(mailpile_home, mailbox)
os.system('cp -a %s/Maildir %s' % (mailpile_test, path))
# Add the test mailboxes
for mailbox in ('tests.mbx', ):
mp.add(os.path.join(mailpile_test, mailbox))
mp.add(os.path.join(mailpile_home, 'Maildir'))
def test_vcards():
say("Testing vcards")
# Do we have a Mr. Rogers contact?
mp.rescan('vcards')
assert(mp.contacts_view('[email protected]'
).result['contact']['fn'] == u'Mr. Rogers')
assert(len(mp.contacts('rogers').result['contacts']) == 1)
def test_load_save_rescan():
say("Testing load/save/rescan")
mp.rescan()
# Save and load the index, just for kicks
messages = len(mp._config.index.INDEX)
assert(messages > 5)
mp._config.index.save(mp._session)
mp._session.ui.reset_marks()
mp._config.index.load(mp._session)
mp._session.ui.reset_marks()
assert(len(mp._config.index.INDEX) == messages)
# Rescan AGAIN, so we can test for the presence of duplicates and
# verify that the move-detection code actually works.
os.system('rm -f %s/Maildir/*/*' % mailpile_home)
mp.add(os.path.join(mailpile_home, 'Maildir2'))
mp.rescan()
# Search for things, there should be exactly one match for each.
mp.order('rev-date')
for search in (FROM_BRE,
['agirorn'],
['subject:emerging'],
['from:twitter', 'brennan'],
['dates:2013-09-17', 'feministinn'],
['mailbox:tests.mbx'] + FROM_BRE,
['att:jpg', 'fimmtudaginn'],
['subject:Moderation', 'kde-isl', '-is:unread'],
['from:bjarni', 'subject:testing', 'subject:encryption',
'should', 'encrypted', 'message', 'tag:mp_enc-decrypted'],
['from:bjarni', 'subject:inline', 'subject:encryption',
'grand', 'tag:mp_enc-mixed-decrypted'],
['from:bjarni', 'subject:signatures', '-is:unread',
'tag:mp_sig-unverified'],
['from:brennan', 'subject:encrypted',
'testing', 'purposes', 'only', 'tag:mp_enc-decrypted'],
['from:brennan', 'subject:signed',
'tag:mp_sig-unverified'],
['from:barnaby', 'subject:testing', 'soup',
'tag:mp_sig-unknown', 'tag:mp_enc-decrypted'],
['from:square', 'subject:here', '-has:attachment'],
[u'subject:' + IS_CHARS, 'subject:8859'],
[u'subject:' + IS_CHARS, 'subject:UTF'],
):
say('Searching for: %s' % search)
results = mp.search(*search)
assert(results.result['stats']['count'] == 1)
say('Checking size of inbox')
mp.order('flat-date')
assert(mp.search('tag:inbox').result['stats']['count'] == 18)
say('FIXME: Make sure message signatures verified')
def test_message_data():
say("Testing message contents")
# Load up a message and take a look at it...
search_md = mp.search('subject:emerging').result
result_md = search_md['data']['metadata'][search_md['thread_ids'][0]]
view_md = mp.view('=%s' % result_md['mid']).result
# That loaded?
message_md = view_md['data']['messages'][result_md['mid']]
assert('athygli' in message_md['text_parts'][0]['data'])
# Load up another message and take a look at it...
search_bre = mp.search(*FROM_BRE).result
result_bre = search_bre['data']['metadata'][search_bre['thread_ids'][0]]
view_bre = mp.view('=%s' % result_bre['mid']).result
# Make sure message threading is working (there are message-ids and
# references in the test data).
assert(len(view_bre['thread_ids']) == 3)
# Make sure we are decoding weird headers correctly
metadata_bre = view_bre['data']['metadata'][view_bre['message_ids'][0]]
message_bre = view_bre['data']['messages'][view_bre['message_ids'][0]]
from_bre = search_bre['data']['addresses'][metadata_bre['from']['aid']]
say('Checking encoding: %s' % from_bre)
assert('=C3' not in from_bre['fn'])
assert('=C3' not in from_bre['address'])
for key, val in message_bre['header_list']:
if key.lower() not in ('from', 'to', 'cc'):
continue
say('Checking encoding: %s: %s' % (key, val))
assert('utf' not in val)
# This message broke our HTML engine that one time
search_md = mp.search('from:heretic', 'subject:outcome').result
result_md = search_md['data']['metadata'][search_md['thread_ids'][0]]
view_md = mp.view('=%s' % result_md['mid'])
assert('Outcome' in view_md.as_html())
def test_composition():
say("Testing composition")
# Create a message...
new_mid = mp.message_compose().result['thread_ids'][0]
assert(mp.search('tag:drafts').result['stats']['count'] == 0)
assert(mp.search('tag:blank').result['stats']['count'] == 1)
assert(mp.search('tag:sent').result['stats']['count'] == 0)
assert(not os.path.exists(mailpile_sent))
# Edit the message (moves from Blank to Draft, not findable in index)
msg_data = {
'to': ['%s#%s' % (MY_FROM, MY_KEYID)],
'bcc': ['[email protected]#%s' % MY_KEYID],
'mid': [new_mid],
'subject': ['This the TESTMSG subject'],
'body': ['Hello world!'],
'attach-pgp-pubkey': ['yes']
}
mp.message_update(**msg_data)
assert(mp.search('tag:drafts').result['stats']['count'] == 1)
assert(mp.search('tag:blank').result['stats']['count'] == 0)
assert(mp.search('TESTMSG').result['stats']['count'] == 1)
assert(not os.path.exists(mailpile_sent))
# Send the message (moves from Draft to Sent, is findable via. search)
del msg_data['subject']
msg_data['body'] = [
('Hello world... thisisauniquestring :) '+ICELANDIC)
]
mp.message_update_send(**msg_data)
assert(mp.search('tag:drafts').result['stats']['count'] == 0)
assert(mp.search('tag:blank').result['stats']['count'] == 0)
# First attempt to send should fail & record failure to event log
config.prefs.default_messageroute = 'default'
config.routes['default'] = {"command": '/no/such/file'}
mp.sendmail()
events = mp.eventlog('source=mailpile.plugins.compose.Sendit',
'data_mid=%s' % new_mid).result['events']
assert(len(events) == 1)
assert(events[0]['flags'] == 'i')
assert(len(mp.eventlog('incomplete').result['events']) == 1)
# Second attempt should succeed!
config.routes.default.command = '%s -i %%(rcpt)s' % mailpile_send
mp.sendmail()
events = mp.eventlog('source=mailpile.plugins.compose.Sendit',
'data_mid=%s' % new_mid).result['events']
assert(len(events) == 1)
assert(events[0]['flags'] == 'c')
assert(len(mp.eventlog('incomplete').result['events']) == 0)
# Verify that it actually got sent correctly
assert('the TESTMSG subject' in contents(mailpile_sent))
# This is the base64 encoding of thisisauniquestring
assert('dGhpc2lzYXVuaXF1ZXN0cmluZ' in contents(mailpile_sent))
assert('encryption: ' not in contents(mailpile_sent).lower())
assert('attach-pgp-pubkey: ' not in contents(mailpile_sent).lower())
assert('x-mailpile-' not in contents(mailpile_sent))
assert(MY_KEYID not in contents(mailpile_sent))
assert(MY_FROM in grep('X-Args', mailpile_sent))
assert('[email protected]' in grep('X-Args', mailpile_sent))
assert('[email protected]' not in grepv('X-Args', mailpile_sent))
for search in (['tag:sent'],
['bcc:[email protected]'],
['thisisauniquestring'],
['thisisauniquestring'] + MY_FROM.split(),
['thisisauniquestring',
'in:mp_sig-verified', 'in:mp_enc-none', 'in:sent'],
['subject:TESTMSG']):
say('Searching for: %s' % search)
assert(mp.search(*search).result['stats']['count'] == 1)
# This is the base64 encoding of thisisauniquestring
assert('dGhpc2lzYXVuaXF1ZXN0cmluZ' in contents(mailpile_sent))
assert('OpenPGP: id=CF5E' in contents(mailpile_sent))
assert('Encryption key for' in contents(mailpile_sent))
assert('; preference=encrypt' in contents(mailpile_sent))
assert('[email protected]' not in grepv('X-Args', mailpile_sent))
os.remove(mailpile_sent)
# Test the send method's "bounce" capability
mp.message_send(mid=[new_mid], to=['[email protected]'])
mp.sendmail()
# This is the base64 encoding of thisisauniquestring
assert('dGhpc2lzYXVuaXF1ZXN0cmluZ' in contents(mailpile_sent))
assert('OpenPGP: id=CF5E' in contents(mailpile_sent))
assert('; preference=encrypt' in contents(mailpile_sent))
assert('[email protected]' not in grepv('X-Args', mailpile_sent))
assert('-i [email protected]' in contents(mailpile_sent))
def test_smtp():
config.prepare_workers(mp._session, daemons=True)
new_mid = mp.message_compose().result['thread_ids'][0]
msg_data = {
'from': ['%s#%s' % (MY_FROM, MY_KEYID)],
'mid': [new_mid],
'subject': ['This the OTHER TESTMSG...'],
'body': ['Hello SMTP world!']
}
config.prefs.default_messageroute = 'default'
config.prefs.always_bcc_self = False
config.routes['default'] = {
'protocol': 'smtp',
'host': 'localhost',
'port': 33415
}
mp.message_update(**msg_data)
mp.message_send(mid=[new_mid], to=['[email protected]'])
mp.sendmail()
config.stop_workers()
def test_html():
say("Testing HTML")
mp.output("jhtml")
assert('<bang>' in '%s' % mp.search('in:inbox').as_html())
mp.output("text")
try:
do_setup()
if '-n' not in sys.argv:
test_vcards()
test_load_save_rescan()
test_message_data()
test_html()
test_composition()
test_smtp()
if '-v' not in sys.argv:
sys.stderr.write("\nTests passed, woot!\n")
else:
say("Tests passed, woot!")
except:
sys.stderr.write("\nTests FAILED!\n")
print
traceback.print_exc()
##[ Interactive mode ]########################################################
if '-i' in sys.argv:
mp.set('prefs/vcard/importers/gravatar/0/active = true')
mp.set('prefs/vcard/importers/gpg/0/active = true')
mp._session.ui = ui
print '%s' % mp.help_splash()
mp.Interact()
##[ Cleanup ]#################################################################
os.system('rm -rf %s' % mailpile_home)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import logging
import os
import django
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils import timezone
from django.utils import unittest
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon import exceptions
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.projects import workflows
from openstack_dashboard import policy_backend
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
with_sel = os.environ.get('WITH_SELENIUM', False)
if with_sel:
from selenium.webdriver import ActionChains # noqa
from selenium.webdriver.common import keys
from socket import timeout as socket_timeout # noqa
INDEX_URL = reverse('horizon:identity:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
PROJECT_DETAIL_URL = reverse('horizon:identity:projects:detail', args=[1])
class TenantsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True,
marker=None) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list', )})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id,
paginate=True,
marker=None) \
.AndReturn([domain_tenants, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class ProjectsViewNonAdminTests(test.TestCase):
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
paginate=True,
marker=None,
admin=False) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.neutron: ('is_extension_supported',),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(True)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(
workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<CreateProjectQuota: create_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:identity:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
if django.VERSION >= (1, 6):
self.assertContains(res, '''
<input class="form-control"
id="id_subnet" min="-1"
name="subnet" type="number" value="10" />
''', html=True)
else:
self.assertContains(res, '''
<input class="form-control"
name="subnet" id="id_subnet"
value="10" type="text" />
''', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self, neutron=False):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post(neutron=True)
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas')})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
def _get_proj_role_assignment(self, project_id):
project_scope = {'project': {'id': project_id}}
return self.role_assignments.filter(scope=project_scope)
def _check_role_list(self, keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data):
if keystone_api_version >= 3:
# admin role with attempt to remove current admin, results in
# warning message
workflow_data[USER_ROLE_PREFIX + "1"] = ['3']
# member role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '3']
# admin role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['2', '3']
# member role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3']
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
# Give user 1 role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='1',
role='2',)
# remove role 2 from user 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2')
# Give user 3 role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1',)
api.keystone.group_list(IsA(http.HttpRequest),
domain=self.domain.id,
project=self.tenant.id) \
.AndReturn(groups)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.remove_group_role(IsA(http.HttpRequest),
project=self.tenant.id,
group='1',
role='1')
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn(roles)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_get(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self._get_proj_role_assignment(project.id)
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_save(self, neutron=False):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save(neutron=True)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self.role_assignments.list()
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
for user in proj_users:
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
role_ids = [role.id for role in roles]
for group in groups:
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_member_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
# django 1.7 and later does not handle the thrown keystoneclient
# exception well enough.
# TODO(mrunge): re-check when django-1.8 is stable
@unittest.skipIf(django.VERSION >= (1, 7, 0),
'Currently skipped with Django >= 1.7')
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self._stub_neutron_api_calls()
self.mox.ReplayAll()
project_id = self.tenants.first().id
csv_url = reverse('horizon:identity:projects:usage',
args=[project_id]) + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
hdr = ('Instance Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours),'
'Time since created (Seconds),State')
self.assertContains(res, '%s\r\n' % hdr)
class DetailProjectViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(project)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertTemplateUsed(res, 'identity/projects/detail.html')
self.assertEqual(res.context['project'].name, project.name)
self.assertEqual(res.context['project'].id, project.id)
self.assertContains(res, "Project Details: %s" % project.name,
1, 200)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view_with_exception(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertRedirectsNoFollow(res, INDEX_URL)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'tenant_update')})
def test_inline_editing_update(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Update - requires get and update
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
api.keystone.tenant_update(
IgnoreArg(),
u'1',
description='a test tenant.',
enabled=True,
name=u'Changed test_tenant')
# Refreshing cell with changed name
changed_tenant = copy.copy(self.tenants.list()[0])
changed_tenant.name = u'Changed test_tenant'
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(changed_tenant)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit button
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Changing project name in cell form
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
name_input = td_element.find_element_by_tag_name('input')
name_input.send_keys(keys.Keys.HOME)
name_input.send_keys("Changed ")
# Saving new project name by AJAX
td_element.find_element_by_class_name('inline-edit-submit').click()
# Waiting for the AJAX response of cell refresh
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']"))
# Checking new project name after cell refresh
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'Changed test_tenant',
"Error: saved tenant name is expected to be "
"'Changed test_tenant'")
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get')})
def test_inline_editing_cancel(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Cancel edit mod is without the request
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Click on cancel button
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
td_element.find_element_by_class_name('inline-edit-cancel').click()
# Cancel is via javascript, so it should be immediate
# Checking that tenant name is not changed
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'test_tenant',
"Error: saved tenant name is expected to be "
"'test_tenant'")
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
quotas: ('get_default_quota_data',)})
def test_membership_list_loads_correctly(self):
member_css_class = ".available_members"
users = self.users.list()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(self.domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(self.quotas.first())
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
self.selenium.get("%s%s" %
(self.live_server_url,
reverse('horizon:identity:projects:create')))
members = self.selenium.find_element_by_css_selector(member_css_class)
for user in users:
self.assertIn(user.name, members.text)
|
|
import os
import click
from ..__about__ import __version__
from ..config.constants import AppEnvVars, ConfigEnvVars
from ..project.core import Project
from ..utils.ci import running_in_ci
from ..utils.fs import Path
from .application import Application
from .build import build
from .clean import clean
from .config import config
from .dep import dep
from .env import env
from .new import new
from .publish import publish
from .run import run
from .shell import shell
from .status import status
from .version import version
@click.group(context_settings={'help_option_names': ['-h', '--help']}, invoke_without_command=True)
@click.option(
'--env',
'-e',
'env_name',
envvar=AppEnvVars.ENV,
default='default',
help='The name of the environment to use [env var: `HATCH_ENV`]',
)
@click.option(
'--project',
'-p',
envvar=ConfigEnvVars.PROJECT,
help='The name of the project to work on [env var: `HATCH_PROJECT`]',
)
@click.option(
'--color/--no-color',
default=None,
help='Whether or not to display colored output (default is auto-detection) [env vars: `FORCE_COLOR`/`NO_COLOR`]',
)
@click.option(
'--interactive/--no-interactive',
envvar=AppEnvVars.INTERACTIVE,
default=None,
help=(
'Whether or not to allow features like prompts and progress bars (default is auto-detection) '
'[env var: `HATCH_INTERACTIVE`]'
),
)
@click.option(
'--verbose',
'-v',
envvar=AppEnvVars.VERBOSE,
count=True,
help='Increase verbosity (can be used additively) [env var: `HATCH_VERBOSE`]',
)
@click.option(
'--quiet',
'-q',
envvar=AppEnvVars.QUIET,
count=True,
help='Decrease verbosity (can be used additively) [env var: `HATCH_QUIET`]',
)
@click.option(
'--data-dir',
envvar=ConfigEnvVars.DATA,
help='The path to a custom directory used to persist data [env var: `HATCH_DATA_DIR`]',
)
@click.option(
'--cache-dir',
envvar=ConfigEnvVars.CACHE,
help='The path to a custom directory used to cache data [env var: `HATCH_CACHE_DIR`]',
)
@click.option(
'--config',
'config_file',
envvar=ConfigEnvVars.CONFIG,
help='The path to a custom config file to use [env var: `HATCH_CONFIG`]',
)
@click.version_option(version=__version__, prog_name='Hatch')
@click.pass_context
def hatch(ctx: click.Context, env_name, project, color, interactive, verbose, quiet, data_dir, cache_dir, config_file):
"""
\b
_ _ _ _
| | | | | | | |
| |_| | __ _| |_ ___| |__
| _ |/ _` | __/ __| '_ \\
| | | | (_| | || (__| | | |
\\_| |_/\\__,_|\\__\\___|_| |_|
"""
if color is None:
if os.environ.get(AppEnvVars.NO_COLOR) == '1':
color = False
elif os.environ.get(AppEnvVars.FORCE_COLOR) == '1':
color = True
if interactive is None:
interactive = not running_in_ci()
app = Application(ctx.exit, verbose - quiet, color, interactive)
app.env_active = os.environ.get(AppEnvVars.ENV_ACTIVE)
if app.env_active and ctx.get_parameter_source('env_name').name == 'DEFAULT': # type: ignore
app.env = app.env_active
else:
app.env = env_name
if config_file:
app.config_file.path = Path(config_file).resolve()
if not app.config_file.path.is_file():
app.abort(f'The selected config file `{str(app.config_file.path)}` does not exist.')
elif not app.config_file.path.is_file():
if app.verbose:
app.display_waiting('No config file found, creating one with default settings now...')
try:
app.config_file.restore()
if app.verbose:
app.display_success('Success! Please see `hatch config`.')
except OSError: # no cov
app.abort(
f'Unable to create config file located at `{str(app.config_file.path)}`. Please check your permissions.'
)
if not ctx.invoked_subcommand:
app.display_info(ctx.get_help())
return
# Persist app data for sub-commands
ctx.obj = app
try:
app.config_file.load()
except OSError as e: # no cov
app.abort(f'Error loading configuration: {e}')
app.config.terminal.styles.parse_fields()
errors = app.initialize_styles(app.config.terminal.styles.raw_data)
if errors and color is not False and not app.quiet: # no cov
for error in errors:
app.display_warning(error)
app.data_dir = Path(data_dir or app.config.dirs.data)
app.cache_dir = Path(cache_dir or app.config.dirs.cache)
if project:
app.project = Project.from_config(app.config, project)
if app.project is None or app.project.root is None:
app.abort(f'Unable to locate project {project}')
return
app.project = Project(Path.cwd())
if app.config.mode == 'local':
return
# The following logic is mostly duplicated for each branch so coverage can be asserted
elif app.config.mode == 'project':
if not app.config.project:
app.display_warning('Mode is set to `project` but no project is set, defaulting to the current directory')
return
possible_project = Project.from_config(app.config, app.config.project)
if possible_project is None:
app.display_warning(f'Unable to locate project {app.config.project}, defaulting to the current directory')
else:
app.project = possible_project
return
elif app.config.mode == 'aware' and app.project.root is None:
if not app.config.project:
app.display_warning('Mode is set to `aware` but no project is set, defaulting to the current directory')
return
possible_project = Project.from_config(app.config, app.config.project)
if possible_project is None:
app.display_warning(f'Unable to locate project {app.config.project}, defaulting to the current directory')
else:
app.project = possible_project
return
hatch.add_command(build)
hatch.add_command(clean)
hatch.add_command(config)
hatch.add_command(dep)
hatch.add_command(env)
hatch.add_command(new)
hatch.add_command(publish)
hatch.add_command(run)
hatch.add_command(shell)
hatch.add_command(status)
hatch.add_command(version)
def main(): # no cov
return hatch(windows_expand_args=False)
|
|
import unittest
import time
import random
import os
import signal
import asyncio
from unittest.mock import patch
import re
from collections import defaultdict
import json
import io
import aiohttp
from molotov.api import scenario, global_setup
from molotov.tests.support import (
TestLoop,
coserver,
dedicatedloop,
set_args,
skip_pypy,
only_pypy,
catch_sleep,
dedicatedloop_noclose,
)
from molotov.tests.statsd import UDPServer
from molotov.run import run, main
from molotov.sharedcounter import SharedCounters
from molotov.util import request, json_request, set_timer
from molotov.session import get_context
from molotov import __version__
_HERE = os.path.dirname(__file__)
_CONFIG = os.path.join(_HERE, "molotov.json")
_RES = []
_RES2 = {}
class TestRunner(TestLoop):
def setUp(self):
super(TestRunner, self).setUp()
_RES[:] = []
_RES2.clear()
def _get_args(self):
args = self.get_args()
args.statsd = True
args.statsd_address = "udp://127.0.0.1:9999"
args.scenario = "molotov.tests.test_run"
return args
@dedicatedloop_noclose
def test_redirect(self):
@scenario(weight=10)
async def _one(session):
# redirected
async with session.get("http://localhost:8888/redirect") as resp:
redirect = resp.history
assert redirect[0].status == 302
assert resp.status == 200
# not redirected
async with session.get(
"http://localhost:8888/redirect", allow_redirects=False
) as resp:
redirect = resp.history
assert len(redirect) == 0
assert resp.status == 302
content = await resp.text()
assert content == ""
_RES.append(1)
args = self._get_args()
args.verbose = 2
args.max_runs = 2
with coserver():
run(args)
self.assertTrue(len(_RES) > 0)
@dedicatedloop_noclose
def test_runner(self):
test_loop = asyncio.get_event_loop()
@global_setup()
def something_sync(args):
grab = request("http://localhost:8888")
self.assertEqual(grab["status"], 200)
grab_json = json_request("http://localhost:8888/molotov.json")
self.assertTrue("molotov" in grab_json["content"])
@scenario(weight=10)
async def here_one(session):
async with session.get("http://localhost:8888") as resp:
await resp.text()
_RES.append(1)
@scenario(weight=90)
async def here_two(session):
if get_context(session).statsd is not None:
get_context(session).statsd.increment("yopla")
_RES.append(2)
args = self._get_args()
server = UDPServer("127.0.0.1", 9999, loop=test_loop)
_stop = asyncio.Future()
async def stop():
await _stop
await server.stop()
server_task = asyncio.ensure_future(server.run())
stop_task = asyncio.ensure_future(stop())
args.max_runs = 3
args.duration = 9999
with coserver():
run(args)
_stop.set_result(True)
test_loop.run_until_complete(asyncio.gather(server_task, stop_task))
self.assertTrue(len(_RES) > 0)
udp = server.flush()
self.assertTrue(len(udp) > 0)
@dedicatedloop
def test_main(self):
with set_args("molotov", "-cq", "-d", "1", "molotov/tests/example.py"):
main()
def _test_molotov(self, *args):
if "--duration" not in args and "-d" not in args:
args = list(args) + ["--duration", "10"]
rc = 0
with set_args("molotov", *args) as (stdout, stderr):
try:
main()
except SystemExit as e:
rc = e.code
return stdout.read().strip(), stderr.read().strip(), rc
@dedicatedloop
def test_version(self):
stdout, stderr, rc = self._test_molotov("--version")
self.assertEqual(stdout, __version__)
@dedicatedloop
def test_empty_scenario(self):
stdout, stderr, rc = self._test_molotov("")
self.assertTrue("Cannot import" in stdout)
@dedicatedloop
def test_config_no_scenario(self):
stdout, stderr, rc = self._test_molotov("-c", "--config", _CONFIG, "DONTEXIST")
wanted = "Can't find 'DONTEXIST' in the config"
self.assertTrue(wanted in stdout)
@dedicatedloop
def test_config_verbose_quiet(self):
stdout, stderr, rc = self._test_molotov("-qv", "--config", _CONFIG)
wanted = "You can't"
self.assertTrue(wanted in stdout)
@dedicatedloop
def test_config_no_scenario_found(self):
stdout, stderr, rc = self._test_molotov("-c", "molotov.tests.test_run")
wanted = "No scenario was found"
self.assertTrue(wanted in stdout)
@dedicatedloop
def test_config_no_single_mode_found(self):
@scenario(weight=10)
async def not_me(session):
_RES.append(3)
stdout, stderr, rc = self._test_molotov(
"-c", "-s", "blah", "molotov.tests.test_run"
)
wanted = "Can't find"
self.assertTrue(wanted in stdout)
@dedicatedloop
def test_name(self):
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
@scenario(weight=30, name="me")
async def here_four(session):
_RES.append(4)
stdout, stderr, rc = self._test_molotov(
"-cx", "--max-runs", "2", "-s", "me", "molotov.tests.test_run"
)
wanted = "SUCCESSES: 2"
self.assertTrue(wanted in stdout)
self.assertTrue(_RES, [4, 4])
@dedicatedloop
def test_single_mode(self):
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
stdout, stderr, rc = self._test_molotov(
"-cx", "--max-runs", "2", "-s", "here_three", "molotov.tests.test_run"
)
wanted = "SUCCESSES: 2"
self.assertTrue(wanted in stdout)
@dedicatedloop
def test_fail_mode_pass(self):
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"2",
"--fail",
"1",
"-s",
"here_three",
"molotov.tests.test_run",
)
wanted = "SUCCESSES: 2"
self.assertTrue(wanted in stdout)
self.assertEqual(rc, 0)
@dedicatedloop
def test_fail_mode_fail(self):
@scenario(weight=10)
async def here_three(session):
assert False
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"2",
"--fail",
"1",
"-s",
"here_three",
"molotov.tests.test_run",
)
self.assertEqual(rc, 1)
@only_pypy
@dedicatedloop
def test_uvloop_pypy(self):
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
orig_import = __import__
def import_mock(name, *args):
if name == "uvloop":
raise ImportError()
return orig_import(name, *args)
with patch("builtins.__import__", side_effect=import_mock):
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"2",
"-s",
"here_three",
"--uvloop",
"molotov.tests.test_run",
)
wanted = "You can't use uvloop"
self.assertTrue(wanted in stdout)
@skip_pypy
@dedicatedloop
def test_uvloop_import_error(self):
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
orig_import = __import__
def import_mock(name, *args):
if name == "uvloop":
raise ImportError()
return orig_import(name, *args)
with patch("builtins.__import__", side_effect=import_mock):
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"2",
"--console-update",
"0",
"-s",
"here_three",
"--uvloop",
"molotov.tests.test_run",
)
wanted = "You need to install uvloop"
self.assertTrue(wanted in stdout)
@skip_pypy
@dedicatedloop
def test_uvloop(self):
try:
import uvloop # noqa
except ImportError:
return
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"2",
"-s",
"here_three",
"--uvloop",
"molotov.tests.test_run",
)
wanted = "SUCCESSES: 2"
self.assertTrue(wanted in stdout, stdout)
@dedicatedloop
def test_delay(self):
with catch_sleep() as delay:
@scenario(weight=10, delay=0.1)
async def here_three(session):
_RES.append(3)
stdout, stderr, rc = self._test_molotov(
"--delay",
".6",
"--console-update",
"0",
"-cx",
"--max-runs",
"2",
"-s",
"here_three",
"molotov.tests.test_run",
)
wanted = "SUCCESSES: 2"
self.assertTrue(wanted in stdout, stdout)
self.assertEqual(delay[:9], [1, 0.1, 1, 0.6, 1, 0.1, 1, 0.6, 1])
@dedicatedloop
def test_rampup(self):
with catch_sleep() as delay:
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
stdout, stderr, rc = self._test_molotov(
"--ramp-up",
"10",
"--workers",
"5",
"--console-update",
"0",
"-cx",
"--max-runs",
"2",
"-s",
"here_three",
"molotov.tests.test_run",
)
# workers should start every 2 seconds since
# we have 5 workers and a ramp-up
# the first one starts immediatly, then each worker
# sleeps 2 seconds more.
delay = [d for d in delay if d != 0]
self.assertEqual(delay, [1, 2.0, 4.0, 6.0, 8.0, 1, 1])
wanted = "SUCCESSES: 10"
self.assertTrue(wanted in stdout, stdout)
@dedicatedloop
def test_sizing(self):
_RES2["fail"] = 0
_RES2["succ"] = 0
with catch_sleep():
@scenario()
async def sizer(session):
if random.randint(0, 20) == 1:
_RES2["fail"] += 1
raise AssertionError()
else:
_RES2["succ"] += 1
stdout, stderr, rc = self._test_molotov(
"--sizing",
"--console-update",
"0",
"--sizing-tolerance",
"5",
"-s",
"sizer",
"molotov.tests.test_run",
)
ratio = float(_RES2["fail"]) / float(_RES2["succ"]) * 100.0
self.assertTrue(ratio < 14.75 and ratio >= 4.75, ratio)
found = re.findall(r"obtained with (\d+) workers", stdout)
assert int(found[0]) > 50
@unittest.skipIf(os.name == "nt", "win32")
@dedicatedloop
def test_sizing_multiprocess(self):
counters = SharedCounters("OK", "FAILED")
with catch_sleep():
@scenario()
async def sizer(session):
if random.randint(0, 10) == 1:
counters["FAILED"] += 1
raise AssertionError()
else:
counters["OK"] += 1
with set_args(
"molotov",
"--sizing",
"-p",
"2",
"--sizing-tolerance",
"5",
"--console-update",
"0",
"-s",
"sizer",
"molotov.tests.test_run",
) as (stdout, stderr):
try:
main()
except SystemExit:
pass
stdout, stderr = stdout.read().strip(), stderr.read().strip()
# stdout, stderr, rc = self._test_molotov()
ratio = (
float(counters["FAILED"].value) / float(counters["OK"].value) * 100.0
)
self.assertTrue(ratio >= 4.75, ratio)
@unittest.skipIf(os.name == "nt", "win32")
@dedicatedloop_noclose
def test_statsd_multiprocess(self):
test_loop = asyncio.get_event_loop()
@scenario()
async def staty(session):
get_context(session).statsd.increment("yopla")
server = UDPServer("127.0.0.1", 9999, loop=test_loop)
_stop = asyncio.Future()
async def stop():
await _stop
await server.stop()
server_task = asyncio.ensure_future(server.run())
stop_task = asyncio.ensure_future(stop())
args = self._get_args()
args.verbose = 2
args.processes = 2
args.max_runs = 5
args.duration = 1000
args.statsd = True
args.statsd_address = "udp://127.0.0.1:9999"
args.single_mode = "staty"
args.scenario = "molotov.tests.test_run"
stream = io.StringIO()
run(args, stream=stream)
_stop.set_result(True)
test_loop.run_until_complete(asyncio.gather(server_task, stop_task))
udp = server.flush()
incrs = 0
for line in udp:
for el in line.split(b"\n"):
if el.strip() == b"":
continue
incrs += 1
# two processes making 5 run each
# we want at least 5 here
self.assertTrue(incrs > 5)
stream.seek(0)
output = stream.read()
self.assertTrue("Happy breaking!" in output, output)
@dedicatedloop
def test_timed_sizing(self):
_RES2["fail"] = 0
_RES2["succ"] = 0
_RES2["messed"] = False
with catch_sleep():
@scenario()
async def sizer(session):
if get_context(session).worker_id == 200 and not _RES2["messed"]:
# worker 2 will mess with the timer
# since we're faking all timers, the current
# time in the test is always around 0
# so to have now() - get_timer() > 60
# we need to set a negative value here
# to trick it
set_timer(-61)
_RES2["messed"] = True
_RES2["fail"] = _RES2["succ"] = 0
if get_context(session).worker_id > 100:
# starting to introduce errors passed the 100th
if random.randint(0, 10) == 1:
_RES2["fail"] += 1
raise AssertionError()
else:
_RES2["succ"] += 1
# forces a switch
await asyncio.sleep(0)
stdout, stderr, rc = self._test_molotov(
"--sizing",
"--sizing-tolerance",
"5",
"--console-update",
"0",
"-cs",
"sizer",
"molotov.tests.test_run",
)
ratio = float(_RES2["fail"]) / float(_RES2["succ"]) * 100.0
self.assertTrue(ratio < 20.0 and ratio > 4.75, ratio)
@unittest.skipIf(os.name == "nt", "win32")
@dedicatedloop
def test_sizing_multiprocess_interrupted(self):
counters = SharedCounters("OK", "FAILED")
@scenario()
async def sizer(session):
if random.randint(0, 10) == 1:
counters["FAILED"] += 1
raise AssertionError()
else:
counters["OK"] += 1
async def _stop():
await asyncio.sleep(2.0)
os.kill(os.getpid(), signal.SIGINT)
asyncio.ensure_future(_stop())
stdout, stderr, rc = self._test_molotov(
"--sizing",
"-p",
"3",
"--sizing-tolerance",
"90",
"--console-update",
"0",
"-s",
"sizer",
"molotov.tests.test_run",
)
self.assertTrue("Sizing was not finished" in stdout)
@dedicatedloop
def test_use_extension(self):
ext = os.path.join(_HERE, "example5.py")
@scenario(weight=10)
async def simpletest(session):
async with session.get("http://localhost:8888") as resp:
assert resp.status == 200
with coserver():
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"1",
"--use-extension=" + ext,
"-s",
"simpletest",
"molotov.tests.test_run",
)
self.assertTrue("=>" in stdout)
self.assertTrue("<=" in stdout)
@dedicatedloop
def test_use_extension_fail(self):
ext = os.path.join(_HERE, "exampleIDONTEXIST.py")
@scenario(weight=10)
async def simpletest(session):
async with session.get("http://localhost:8888") as resp:
assert resp.status == 200
with coserver():
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"1",
"--use-extension=" + ext,
"-s",
"simpletest",
"molotov.tests.test_run",
)
self.assertTrue("Cannot import" in stdout)
@dedicatedloop
def test_use_extension_module_name(self):
ext = "molotov.tests.example5"
@scenario(weight=10)
async def simpletest(session):
async with session.get("http://localhost:8888") as resp:
assert resp.status == 200
with coserver():
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"1",
"--use-extension=" + ext,
"-s",
"simpletest",
"molotov.tests.test_run",
)
self.assertTrue("=>" in stdout)
self.assertTrue("<=" in stdout)
@dedicatedloop
def test_use_extension_module_name_fail(self):
ext = "IDONTEXTSIST"
@scenario(weight=10)
async def simpletest(session):
async with session.get("http://localhost:8888") as resp:
assert resp.status == 200
with coserver():
stdout, stderr, rc = self._test_molotov(
"-cx",
"--max-runs",
"1",
"--use-extension=" + ext,
"-s",
"simpletest",
"molotov.tests.test_run",
)
self.assertTrue("Cannot import" in stdout)
@dedicatedloop
def test_quiet(self):
@scenario(weight=10)
async def here_three(session):
_RES.append(3)
stdout, stderr, rc = self._test_molotov(
"-cx", "--max-runs", "1", "-q", "-s", "here_three", "molotov.tests.test_run"
)
self.assertEqual(stdout, "")
self.assertEqual(stderr, "")
@dedicatedloop_noclose
def test_slow_server_force_shutdown(self):
@scenario(weight=10)
async def _one(session):
async with session.get("http://localhost:8888/slow") as resp:
assert resp.status == 200
_RES.append(1)
args = self._get_args()
args.duration = 0.1
args.verbose = 2
args.max_runs = 1
args.force_shutdown = True
start = time.time()
with coserver():
run(args)
# makes sure the test is stopped even if the server
# hangs a socket
self.assertTrue(time.time() - start < 4)
self.assertTrue(len(_RES) == 0)
@dedicatedloop_noclose
def test_slow_server_graceful(self):
@scenario(weight=10)
async def _one(session):
async with session.get("http://localhost:8888/slow") as resp:
assert resp.status == 200
_RES.append(1)
args = self._get_args()
args.duration = 0.1
args.verbose = 2
args.max_runs = 1
# graceful shutdown on the other hand will wait
# for the worker completion
args.graceful_shutdown = True
start = time.time()
with coserver():
run(args)
# makes sure the test finishes
self.assertTrue(time.time() - start > 5)
self.assertTrue(len(_RES) == 1)
@dedicatedloop
def test_single_run(self):
_RES = defaultdict(int)
with catch_sleep():
@scenario()
async def one(session):
_RES["one"] += 1
@scenario()
async def two(session):
_RES["two"] += 1
@scenario()
async def three(session):
_RES["three"] += 1
stdout, stderr, rc = self._test_molotov(
"--single-run", "molotov.tests.test_run",
)
assert rc == 0
assert _RES["one"] == 1
assert _RES["two"] == 1
assert _RES["three"] == 1
@dedicatedloop
def _XXX_test_enable_dns(self, m_resolve):
m_resolve.return_value = ("http://localhost", "http://localhost", "localhost")
with catch_sleep():
@scenario()
async def one(session):
async with session.get("http://localhost"):
pass
stdout, stderr, rc = self._test_molotov(
"--single-run", "molotov.tests.test_run",
)
m_resolve.assert_called()
@dedicatedloop
def xxx_test_disable_dns(self, m_resolve):
with catch_sleep():
@scenario()
async def one(session):
async with session.get("http://localhost"):
pass
stdout, stderr, rc = self._test_molotov(
"--disable-dns-resolve", "--single-run", "molotov.tests.test_run",
)
m_resolve.assert_not_called()
@dedicatedloop
def test_bug_121(self):
PASSED = [0]
with catch_sleep():
@scenario()
async def scenario_one(session):
cookies = {
"csrftoken": "sometoken",
"dtk": "1234",
"djdt": "hide",
"sessionid": "5678",
}
boundary = "----WebKitFormBoundaryFTE"
headers = {
"X-CSRFToken": "sometoken",
"Content-Type": "multipart/form-data; boundary={}".format(boundary),
}
data = json.dumps({"1": "xxx"})
with aiohttp.MultipartWriter(
"form-data", boundary=boundary
) as mpwriter:
mpwriter.append(
data,
{
"Content-Disposition": 'form-data; name="json"; filename="blob"',
"Content-Type": "application/json",
},
)
async with session.post(
"http://localhost:8888",
data=mpwriter,
headers=headers,
cookies=cookies,
) as resp:
res = await resp.text()
assert data in res
PASSED[0] += 1
args = self._get_args()
args.verbose = 2
args.max_runs = 1
with coserver():
res = run(args)
assert PASSED[0] == 1
assert res["OK"] == 1
@dedicatedloop
def test_local_import(self):
test = os.path.join(_HERE, "example9.py")
with coserver():
stdout, stderr, rc = self._test_molotov("--max-runs", "1", test)
self.assertTrue("SUCCESSES: 1" in stdout, stdout)
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser
@file mi-dataset/mi/dataset/parser/camds.py
@author Dan Mergens
@brief Parser for camds_abc html files.
This file contains code for parsing CAMDS HTML formatted files, extracting the metadata associated therein and
generating the corresponding metadata particles.
Input is an HTML formatted file. This file should contain a reference to a local PNG file whose absolute path must be
derived from the location of the HTML file to determine path information for the metadata.
Release Notes:
Initial release: 27 Jan 2017
"""
import re
from datetime import datetime
from bs4 import BeautifulSoup
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException
from mi.core.instrument.data_particle import DataParticleKey, DataParticle, DataParticleValue
from mi.core.log import get_logger
from mi.dataset.dataset_parser import SimpleParser
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
log = get_logger()
INT_REGEX = re.compile(r'(\d+)')
# Example: <TD colspan="2"><a href="20100101T000104,301.png" ><img width="100%" src="20100101T000104,301.png"
# alt="20100101T000104,301.png" ></a></TD><TD>
IMAGE_REGEX = re.compile(r'(\S+) \S+')
def read_filename(regex, string):
match = re.match(regex, string)
if not match:
return ''
return match.group(1)
# Example:
# <TD width="200px" >Time Taken:</TD><TD>2010-01-01 00:01:04:301</TD>
TIMESTAMP_REGEX = re.compile(r'''(
(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}) # date
\W
(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}):(?P<millisecond>\d{3}) # time
)''', re.VERBOSE)
def read_timestamp(regex, string):
"""
Read the timestamp from the html with NTP timestamp
"""
match = re.match(regex, string)
if not match:
return None
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
hour = int(match.group('hour'))
minute = int(match.group('minute'))
second = int(match.group('second'))
millisecond = int(match.group('millisecond'))
ts = datetime(year, month, day, hour, minute, second, int(millisecond*1e3))
return (ts - datetime(1900, 1, 1)).total_seconds()
# Example:
# <TD >Zoom Position:</TD><TD>115</TD>
POSITION_REGEX = INT_REGEX
# Example:
# <TD >Focus Position:</TD><TD>30</TD>
FOCUS_REGEX = INT_REGEX
# Example:
# <TD >Iris Position:</TD><TD>5</TD>
IRIS_REGEX = INT_REGEX
# Note: Shutter speed is not used by camds_image_metadata
# Example:
# <TD >Shutter Speed:</TD><TD>150 (Manual)</TD>
SHUTTER_REGEX = INT_REGEX
# Example:
# <TD >GAIN:</TD><TD>11 (Manual)</TD>
GAIN_REGEX = INT_REGEX
# Example:
# <TD >Pan and Tilt Position:</TD><TD>X=0 Y=0</TD>
PAN_TILT_REGEX = re.compile(r'X=(?P<X>\d+) Y=(?P<Y>\d+)')
# Example:
# <TD >Lamp1:</TD><TD>19</TD>
# <TD >Lamp2:</TD><TD>7</TD>
LAMP_REGEX = INT_REGEX
# Note: Laser is not used by camds_image_metadata
# Example:
# <TD >Laser:</TD><TD>On</TD>
LASER_REGEX = re.compile(r'(\w+)')
def read_int(regex, string):
match = re.match(regex, string)
if not match:
return None
return match.group(0)
def read_pan_tilt(regex, string):
match = re.match(regex, string)
x = 0
y = 0
if match:
x = match.group('X')
y = match.group('Y')
return x, y
def read_laser(regex, string):
match = re.match(regex, string)
if not match:
return False
if match.group(0) == 'On':
return True
return False
class CamdsParserDataParticleType(BaseEnum):
CAMDS_IMAGE_METADATA = 'camds_image_metadata'
class CamdsParserDataParticleKey(BaseEnum):
"""
From 'camds_image_metadata' - DICT379
"""
PAN_POSITION = 'camds_pan_position' # PD2659
TILT_POSITION = 'camds_tilt_position' # PD2660
FOCUS_POSITION = 'camds_focus_position' # PD2661
ZOOM_POSITION = 'camds_zoom_position' # PD2662
IRIS_POSITION = 'camds_iris_position' # PD2663
GAIN = 'camds_gain' # PD2664
RESOLUTION = 'camds_resolution' # PD2665
BRIGHTNESS = 'camds_brightness' # PD2666
IMAGE = 'filepath' # PD3808 - relative filepath on raw data server
BRIGHTNESS2 = 'camds_brightness2' # PD8052
class CamdsHTMLDataKey(BaseEnum):
IMAGE = 'Image:'
TIMESTAMP = 'Time Taken:'
ZOOM_POSITION = 'Zoom Position:'
FOCUS_POSITION = 'Focus Position:'
IRIS_POSITION = 'Iris Position:'
SHUTTER_SPEED = 'Shutter Speed:'
GAIN = 'GAIN:'
PAN_TILT_POSITION = 'Pan and Tilt Position:'
LAMP1 = 'Lamp1:'
LAMP2 = 'Lamp2:'
LASER = 'Laser:'
class CamdsMetadataParticle(DataParticle):
"""
Abstract class for the camds_image_metadata data set.
"""
_data_particle_type = CamdsParserDataParticleType.CAMDS_IMAGE_METADATA
def __init__(self,
raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(CamdsMetadataParticle, self).__init__(
raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence
)
self._data_dict = dict()
def _build_parsed_values(self):
"""
Build and return the parsed values for camds_image_metadata from self.raw_data.
:return:
"""
result = []
if type(self.raw_data) is not dict:
raise SampleException('Data provided to particle generator is not a valid dictionary: %r' % self.raw_data)
data_dict = self.raw_data
# check for required dictionary values
required_keys = [
CamdsParserDataParticleKey.IMAGE
]
for key in required_keys:
if key not in data_dict.keys():
raise SampleException('Missing required key (%s)' % key)
for key in data_dict:
value, encoding = data_dict[key]
result.append(self._encode_value(key, value, encoding))
return result
class CamdsHtmlParser(SimpleParser):
def __init__(self, stream_handle, exception_callback):
# no sieve function since we are not using the chunker here
super(CamdsHtmlParser, self).__init__({}, stream_handle, exception_callback)
self._particle_class = CamdsMetadataParticle
# metadata tuple of the form:
# html key: (value name, regex, encoder, value)
self.metadata_encoding = {
CamdsHTMLDataKey.IMAGE:
(CamdsParserDataParticleKey.IMAGE, IMAGE_REGEX, read_filename, str),
CamdsHTMLDataKey.TIMESTAMP:
(CamdsHTMLDataKey.TIMESTAMP, TIMESTAMP_REGEX, read_timestamp, float),
CamdsHTMLDataKey.ZOOM_POSITION:
(CamdsParserDataParticleKey.ZOOM_POSITION, POSITION_REGEX, read_int, int),
CamdsHTMLDataKey.FOCUS_POSITION:
(CamdsParserDataParticleKey.FOCUS_POSITION, FOCUS_REGEX, read_int, int),
CamdsHTMLDataKey.IRIS_POSITION:
(CamdsParserDataParticleKey.IRIS_POSITION, IRIS_REGEX, read_int, int),
# CamdsHTMLDataKey.SHUTTER_SPEED:
# (CamdsParserDataParticleKey., SHUTTER_REGEX, read_one, int),
CamdsHTMLDataKey.GAIN:
(CamdsParserDataParticleKey.GAIN, GAIN_REGEX, read_int, int),
CamdsHTMLDataKey.PAN_TILT_POSITION:
((CamdsParserDataParticleKey.PAN_POSITION, CamdsParserDataParticleKey.TILT_POSITION),
PAN_TILT_REGEX, read_pan_tilt, int),
# both Lamp1 and Lamp2 should always be the same for the uncabled CAMDS
CamdsHTMLDataKey.LAMP1:
(CamdsParserDataParticleKey.BRIGHTNESS, LAMP_REGEX, read_int, int),
CamdsHTMLDataKey.LAMP2:
(CamdsParserDataParticleKey.BRIGHTNESS2, LAMP_REGEX, read_int, int),
# 'Laser:': ('LASER', LASER_REGEX, read_laser, int)
}
def parse_file(self):
"""
Parse a CAMDS HTML file and collect metadata particles.
:returns dictionary of metadata keys, parsed values and encoding method
"""
data_dict = {}
html_doc = self._stream_handle.read()
soup = BeautifulSoup(html_doc, 'html.parser')
tables = soup.find_all('table')
if not tables:
raise SampleException('no tables present')
for table in tables:
for row in table.find_all('tr'):
columns = row.find_all('td')
if not columns:
continue
key = columns[0].get_text()
if not key:
continue
# use a dummy key for the image filename since it has no associated key in the html table
if len(columns) == 2:
key = columns[0].get_text()
value = columns[1].get_text()
else:
if '.png' in key:
value = key
key = CamdsHTMLDataKey.IMAGE
else:
continue
# pan/tilt has two values - needs special handling
if key == CamdsHTMLDataKey.PAN_TILT_POSITION:
names, regex, encoder, encoding_type = self.metadata_encoding[key]
encoded_values = encoder(regex, value)
for name, value in zip(names, encoded_values):
data_dict[name] = value, encoding_type
elif key in self.metadata_encoding.keys():
name, regex, encoder, encoding_type = self.metadata_encoding[key]
encoded_value = encoder(regex, value)
data_dict[name] = encoded_value, encoding_type
# extract timestamp and use for creation of the particle
timestamp, _ = data_dict.pop(CamdsHTMLDataKey.TIMESTAMP)
record = self._extract_sample(self._particle_class, None, data_dict, timestamp)
self._record_buffer.append(record)
self._file_parsed = True
|
|
from gevent.event import AsyncResult, Event
from gevent import Timeout
from pysteamkit.protobuf import steammessages_clientserver_pb2
from pysteamkit.steam_base import EMsg, EResult, EUniverse, EAccountType
from pysteamkit.steamid import SteamID
from pysteamkit.steam3 import msg_base
from pysteamkit.steam3.connection import TCPConnection
from pysteamkit.steam3.steamapps import SteamApps
from pysteamkit.util import Util
base_server_list = [('72.165.61.174', 27017), ('72.165.61.174', 27018),
('72.165.61.175', 27017), ('72.165.61.175', 27018),
('72.165.61.185', 27017), ('72.165.61.185', 27018),
('72.165.61.187', 27017), ('72.165.61.187', 27018),
('146.66.152.12', 27017), ('146.66.152.12', 27018),
('209.197.29.196', 27017), ('209.197.29.197', 27018),
('cm0.steampowered.com', 27017)]
class SteamClient():
def __init__(self, callback):
self.callback = callback
self.listeners = []
self.message_constructors = dict()
self.message_events = dict()
self.message_job_events = dict()
self.username = None
self.jobid = 0
self.steam2_ticket = None
self.session_token = None
self.server_list = dict()
self.account_type = None
self.connection = TCPConnection(self)
self.connection_event = Event()
self.logon_event = Event()
self.register_listener(callback)
self.steamapps = SteamApps(self)
self.register_message(EMsg.ClientLogOnResponse, msg_base.ProtobufMessage, steammessages_clientserver_pb2.CMsgClientLogonResponse)
self.register_message(EMsg.ClientLoggedOff, msg_base.ProtobufMessage, steammessages_clientserver_pb2.CMsgClientLoggedOff)
self.register_message(EMsg.ClientSessionToken, msg_base.ProtobufMessage, steammessages_clientserver_pb2.CMsgClientSessionToken)
def initialize(self):
self.connect(base_server_list)
return self.callback.try_initialize_connection(self)
def connect(self, addresses):
self.connection_event.clear()
self.logon_event.clear()
for addr in addresses:
if self.connection.connect(addr):
self.connection_event.wait()
return True
return False
def disconnect(self):
if self.steamid:
self.logout()
self.connection.disconnect()
def handle_connected(self):
self.connection_event.set()
def handle_disconnected(self, reason):
self.connection_event.clear()
self.logon_event.clear()
# throw errors EVERYWHERE
for k in self.message_events.keys():
if self.message_events[k]:
self.message_events[k].set_exception(Exception())
self.message_events[k] = None
for k in self.message_job_events.keys():
if self.message_job_events[k]:
self.message_job_events[k].set_exception(Exception())
self.message_job_events[k] = None
if self.callback.handle_disconnected(self, reason):
return
self.connection_event.set()
self.logon_event.set()
self.username = None
self.jobid = 0
self.steam2_ticket = None
self.session_token = None
def register_listener(self, listener):
self.listeners.append(listener)
def register_message(self, emsg, container, header, body=None):
self.message_constructors[emsg] = (container, header, body)
self.message_events[emsg] = None
def wait_for_message(self, emsg, timeout=None):
if not emsg in self.message_events:
#print emsg, 'not registered!'
return None
while True:
if emsg != EMsg.ChannelEncryptResult and emsg != EMsg.ClientLogOnResponse:
self.logon_event.wait()
if not self.connection.connected:
raise Exception("Not connected, unable to send message")
if self.message_events[emsg]:
async_result = self.message_events[emsg]
else:
async_result = self.message_events[emsg] = AsyncResult()
try:
return async_result.get(timeout=timeout)
except Timeout:
return 'Timed Out'
except Exception:
pass
def wait_for_job(self, message, emsg):
jobid = self.jobid
self.jobid += 1
message.header.source_jobid = jobid
while True:
self.logon_event.wait()
if not self.connection.connected:
raise Exception("Not connected, unable to send message")
self.connection.send_message(message)
async_result = self.message_job_events[jobid] = AsyncResult()
try:
return async_result.get()
except Exception as e:
pass
@property
def steamid(self):
return self.connection.steamid
def login_anonymous(self):
message = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientLogon, EMsg.ClientLogon)
message.proto_header.client_sessionid = 0
message.proto_header.steamid = SteamID.make_from(0, 0, EUniverse.Public, EAccountType.AnonUser).steamid
message.body.protocol_version = 65575
message.body.client_os_type = 10
message.body.machine_id = "OK"
self.connection.send_message(message)
logonResponse = self.wait_for_message(EMsg.ClientLogOnResponse)
return logonResponse.body
def login(self, username=None, password=None, login_key=None, auth_code=None, steamid=0, two_factor_code=None):
self.username = username
message = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientLogon, EMsg.ClientLogon)
message.proto_header.client_sessionid = 0
if steamid > 0:
message.proto_header.steamid = steamid
else:
message.proto_header.steamid = SteamID.make_from(0, 0, EUniverse.Public, EAccountType.Individual).steamid
message.body.protocol_version = 65575
message.body.client_package_version = 1771
message.body.client_os_type = 10
message.body.client_language = "english"
message.body.machine_id = "OK"
message.body.account_name = username
message.body.password = password
if login_key:
message.body.login_key = login_key
if auth_code:
message.body.auth_code = auth_code
if two_factor_code:
message.body.two_factor_code = two_factor_code
sentryfile = self.callback.get_sentry_file(username)
if sentryfile:
message.body.sha_sentryfile = Util.sha1_hash(sentryfile)
message.body.eresult_sentryfile = EResult.OK
else:
message.body.eresult_sentryfile = EResult.FileNotFound
localip = self.connection.get_bound_address()
message.body.obfustucated_private_ip = 1111
self.connection.send_message(message)
logonResponse = self.wait_for_message(EMsg.ClientLogOnResponse)
if self.steamid:
self.account_type = self.steamid.accounttype
return logonResponse.body
def logout(self):
message = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientLogOff, EMsg.ClientLogOff)
self.connection.send_message(message)
return None
def get_session_token(self):
if self.session_token:
return self.session_token
# this also can't fit in a job because it's sent on login
if self.account_type == EAccountType.Individual:
self.wait_for_message(EMsg.ClientSessionToken)
return self.session_token
return None
def handle_message(self, emsg_real, msg):
emsg = Util.get_msg(emsg_real)
#print "EMsg is", Util.lookup_enum(EMsg, emsg)
if emsg == EMsg.ClientLogOnResponse:
self.handle_client_logon(msg)
elif emsg == EMsg.ClientUpdateMachineAuth:
self.handle_update_machine_auth(msg)
elif emsg == EMsg.ClientSessionToken:
self.handle_session_token(msg)
elif emsg == EMsg.ClientServerList:
self.handle_server_list(msg)
for listener in self.listeners:
listener.handle_message(emsg_real, msg)
if emsg in self.message_constructors:
constructor = self.message_constructors[emsg]
if constructor[2]:
message = constructor[0](constructor[1], constructor[2])
else:
message = constructor[0](constructor[1])
message.parse(msg)
if self.message_events.get(emsg):
self.message_events[emsg].set(message)
self.message_events[emsg] = None
if self.message_job_events.get(message.header.target_jobid):
self.message_job_events[message.header.target_jobid].set(message)
self.message_job_events[message.header.target_jobid] = None
def handle_client_logon(self, msg):
message = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientLogonResponse)
message.parse(msg)
if message.body.steam2_ticket:
self.steam2_ticket = message.body.steam2_ticket
self.logon_event.set()
def handle_update_machine_auth(self, msg):
message = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientUpdateMachineAuth)
message.parse(msg)
sentryfile = message.body.bytes
hash = Util.sha1_hash(sentryfile)
self.callback.store_sentry_file(self.username, sentryfile)
response = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientUpdateMachineAuthResponse, EMsg.ClientUpdateMachineAuthResponse)
response.header.target_jobid = message.header.source_jobid
response.body.cubwrote = message.body.cubtowrite
response.body.eresult = EResult.OK
response.body.filename = message.body.filename
response.body.filesize = message.body.cubtowrite
response.body.getlasterror = 0
response.body.offset = message.body.offset
response.body.sha_file = hash
response.body.otp_identifier = message.body.otp_identifier
response.body.otp_type = message.body.otp_type
self.connection.send_message(response)
def handle_session_token(self, msg):
message = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientSessionToken)
message.parse(msg)
self.session_token = message.body.token
def handle_server_list(self, msg):
message = msg_base.ProtobufMessage(steammessages_clientserver_pb2.CMsgClientServerList)
message.parse(msg)
for server in message.body.servers:
if not server.server_type in self.server_list:
self.server_list[server.server_type] = []
self.server_list[server.server_type].append((Util.long2ip(server.server_ip), server.server_port))
|
|
from datetime import datetime, timedelta, time
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import ArrayField
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from imagekit.models import ImageSpecField
from pilkit.processors import SmartResize
@python_2_unicode_compatible
class User(AbstractUser):
""" Site User.
A user can either be an admin or a general user. Most users
can do most things. An admin user can be a site owner, add new
users, create and manage networks and shift users from active
to inactive. A general user creates and collaborates on content.
"""
# Made optional for users not pushing content to an org (freelancers)
organization = models.ForeignKey(
'Organization',
blank=True,
null=True,
)
ADMIN = 'Admin'
EDITOR = 'Editor'
STAFF = 'Staff'
OTHER = 'Other'
USER_TYPE_CHOICES = (
(ADMIN, 'Admin'),
(EDITOR, 'Editor'),
(STAFF, 'Staff'),
(OTHER, 'Other'),
)
user_type = models.CharField(
max_length=25,
choices=USER_TYPE_CHOICES,
help_text='Type of user.'
)
credit_name = models.CharField(
max_length=75,
help_text='Full name of user as listed as a credit on content.',
blank=True,
)
name_pronunciation = models.TextField(
help_text="Instruction on the proper pronunciation of the users name.",
blank=True,
)
pronoun = models.CharField(
max_length=50,
help_text='Users preferred pronoun.',
blank=True,
)
title = models.CharField(
max_length=100,
help_text='Professional title.',
blank=True,
)
phone = models.CharField(
max_length=20,
blank=True,
)
bio = models.TextField(
help_text="Short bio.",
blank=True,
)
location = models.CharField(
max_length=255,
blank=True,
)
expertise = ArrayField(
models.CharField(max_length=255),
default=list,
help_text='Array of user skills and beats to filter/search by.',
blank=True,
)
notes = models.ManyToManyField(
'Note',
blank=True,
)
photo = models.ImageField(
upload_to='users',
blank=True,
)
display_photo = ImageSpecField(
source='photo',
processors=[SmartResize(500, 500)],
format='JPEG',
)
website = models.URLField(
max_length=250,
blank=True,
)
class Meta:
verbose_name = 'User'
verbose_name_plural = "Users"
ordering = ['credit_name']
def __str__(self):
return self.credit_name
def get_absolute_url(self):
return reverse('user_detail', kwargs={'pk': self.id})
def get_user_content(self):
"""Return list of all content user is associated with as
owner, editor or credit.
Results are used to display relevant content for a user on
their dashboard and user profile.
"""
user_content = []
projects_owner = self.project_owner.all()
projects_team = self.project_team_member.all()
story_owner = self.story_owner.all()
story_team = self.story_team_member.all()
facet_owner = self.facetowner.all()
# facet_team = self.team.all()
user_content.extend(projects_owner)
user_content.extend(projects_team)
user_content.extend(story_owner)
user_content.extend(story_team)
user_content.extend(facet_owner)
return user_content
def get_user_assets(self):
"""Return assets that a user is associated with."""
user_assets = []
images_owner = self.imageasset_set.all()
documents_owner = self.documentasset_set.all()
audio_owner = self.audioasset_set.all()
video_owner = self.videoasset_set.all()
user_assets.extend(images_owner)
user_assets.extend(documents_owner)
user_assets.extend(audio_owner)
user_assets.extend(video_owner)
return user_assets
def get_user_tasks(self):
"""Return all the tasks for a user."""
from . import Task
tasks = Task.objects.filter(Q(owner=self) | Q(assigned_to=self))
return tasks
def inbox_comments(self):
"""Return list of comments from discussions the user is a participant in.
Collects all relevant comments for a specific user to show in their
dashboard and inbox.
"""
from . import Comment
user_discussion_ids = self.comment_set.all().values('discussion_id')
return (Comment
.objects
.filter(discussion_id__in=user_discussion_ids)
.exclude(user_id=self.id)
.select_related('user', 'discussion')
)
def recent_comments(self):
"""Recent comments in users's discussions.
Return list of comments:
- from discussions the user is a participant in
- since the user's last login
- where the user isn't the author
For display on primary dashboard.
"""
# FIXME: this appear to just be a subset of inbox_comments; can this use that?
from . import Comment
# Discussions user is involved in
user_discussion_ids = self.comment_set.all().values('discussion_id')
# Comments tht
return (Comment
.objects
.filter(discussion_id__in=user_discussion_ids,
date__gte=self.last_login)
.exclude(user_id=self.id)
)
# formerly get_user_contact_list
def get_user_contact_list_vocab(self):
""" Return queryset containing all users a specific user can contact.
This includes any user that's a member of an organization in network.
This vocab list populates to selection for messaging.
"""
organization = self.organization
org_collaborators = organization.get_org_collaborators_vocab()
contact_list = User.objects.filter(Q(Q(organization__in=org_collaborators) | Q(organization=organization)))
return contact_list
def private_messages_received(self):
""" Return all private messages a user is a recipient of.
Displayed in user inbox under 'inbox'.
"""
return self.private_message_recipient.all()
def private_messages_sent(self):
""" Return all private messages a user has sent.
Displayed in user inbox under 'sent'.
"""
return self.private_message_sender.all()
def get_user_searchable_content(self):
""" Return queryset of user specific content that is searchable.
A user can return their own notes in search results.
"""
return self.usernote_owner.all()
@property
def description(self):
org = self.organization.name if self.organization else "Contractor"
return "{user}, {title}, {org}".format(
user=self.credit_name,
title=self.title,
org=org,
)
@property
def search_title(self):
return self.credit_name
@property
def type(self):
return "User"
|
|
#! /usr/local/bin/python
import numpy as np
from . import downcut
from .avulsion_utils import lowest_cell_elev, sort_lowest_neighbors
def lowest_neighbor(n, sub):
"""Find lowest neighbor value around a point.
Parameters
----------
n : ndarray
Grid of elevations.
sub : tuple of int
Row/column subscript into elevation matrix.
Returns
-------
tuple of int
Subscripts into elevation matrix of the lowest neighbor.
"""
i, j = sub
if j == n.shape[1] - 1:
di, dj = np.array([0, 1, 1]), np.array([-1, -1, 0])
elif j == 0:
di, dj = np.array([1, 1, 0]), np.array([0, 1, 1])
else:
di, dj = np.array([0, 1, 1, 1, 0]), np.array([-1, -1, 0, 1, 1])
lowest = np.argmin(n[i + di, j + dj])
return i + di[lowest], j + dj[lowest]
def lowest_neighbor_prograde(n, sub):
i, j = sub
if j == n.shape[1] - 1:
di, dj = np.array([0, 1, 1]), np.array([-1, -1, 0])
elif j == 0:
di, dj = np.array([1, 1, 0]), np.array([0, 1, 1])
else:
di, dj = np.array([0, 1, 1, 1, 0]), np.array([-1, -1, 0, 1, 1])
subaerial_low = min(x for x in (n[i + di, j + dj]) if x > 0)
lowest = np.where((n[i + di, j + dj]) == subaerial_low)[0][0]
return i + di[lowest], j + dj[lowest]
def below_sea_level(z, sea_level):
"""Check if an elevation is below sea level.
Parameters
----------
z : float
Elevation.
sea_level : float
Elevation of sea level.
Returns
-------
boolean
`True` if at or below sea level. Otherwise, `False`.
"""
return z <= sea_level
def at_river_mouth(z, sub, z0):
"""Check if a cell is at the river mouth.
Parameters
----------
z : ndarray
2D-array of elevations.
sub : tuple of int
Row and column subscript into *z*.
z0 : float
Elevation of sea level (or `None`).
Returns
-------
boolean
True if the cell at the given subscript is at the river mouth.
"""
try:
return sub[0] == z.shape[0] - 1 or below_sea_level(z[sub], z0)
except IndexError:
return True
def at_end_of_domain(z, sub):
"""Check if a cell a river mouth at the end of domain.
Parameters
----------
z : ndarray
2D-array of elevations.
sub : tuple of int
Row and column subscript into *z*.
Returns
-------
boolean
True if the cell at the given subscript is at the river mouth.
"""
try:
return sub[0] == z.shape[0] - 1
except IndexError:
return True
def riv_cell_at_sea_level(z, sub, z0):
"""Check if a river cell is at sea level.
Parameters
----------
z : ndarray
2D-array of elevations.
sub : tuple of int
Row and column subscript into *z*.
z0 : float
Elevation of sea level (or `None`).
Returns
-------
boolean
True if the cell at the given subscript is at the river mouth.
"""
try:
below_sea_level(z[sub], z0)
except IndexError:
return True
def find_course(z, riv_i, riv_j, SE_loc, channel_depth, sea_level=None):
"""Find the course of a river.
Given a river course as subscripts, (*i*, *j*), into an array of
elevations, (*z*), find the river path until the coast (*sea_level*) or
the end of the domain.
Parameters
----------
z : ndarray
Grid elevations.
riv_i : array_like of int
Row subscripts (into *n*) for river.
riv_j : array_like of int
Column subscripts (into *n*) for river.
sea_level : None, optional
The current sea level.
Returns
-------
tuple of array_like
Row and column indices of the new river path.
Examples
--------
>>> import numpy as np
>>> z = np.array([[4., 3., 4.],
... [2., 3., 3.],
... [2., 1., 2.]])
>>> riv_i, riv_j = np.zeros(9, dtype=int), np.zeros(9, dtype=int)
>>> riv_i[0], riv_j[0] = 0, 1
>>> find_course(z, riv_i[:1], riv_j[:1], 1)
(array([0, 1, 2]), array([1, 0, 1]))
>>> find_course(z, riv_i[:1], riv_j[:1], sea_level=2.)
(array([0, 1]), array([1, 0]))
>>> z = np.array([[4., 3., 4.],
... [2., 3., 3.],
... [2., 1., 2.],
... [2., 1.5, 2]])
>>> find_course(z, riv_i[:1], riv_j[:1], sea_level=0.)
(array([0, 1, 2, 3]), array([1, 0, 1, 1]))
>>> z
"""
# function to find the steepest descent route
# note: this needs to be improved to remove potential bias that may occur
# if two or more cells have the steepest descent elevation
old_course = zip(riv_i, riv_j)
n_levee = np.copy(z)
n_levee[riv_i, riv_j] += channel_depth
riv_i = riv_i[:SE_loc]
riv_j = riv_j[:SE_loc]
assert riv_i.size > 0 and riv_j.size > 0
if sea_level is None:
sea_level = -np.finfo(float).max
for n in range(1, riv_i.size):
if at_end_of_domain(z, (riv_i[n - 1], riv_j[n - 1])):
return riv_i[:n], riv_j[:n]
# for n in range(1, riv_i.size):
# if riv_cell_at_sea_level(z, (riv_i[n - 1], riv_j[n - 1]), sea_level):
# return riv_i[:n-1], riv_j[:n-1]
new_i = np.empty(z.size, dtype=np.int)
new_j = np.empty(z.size, dtype=np.int)
new_i[: len(riv_j)] = riv_i[:]
new_j[: len(riv_i)] = riv_j[:]
pits = True
while pits:
for n in range(riv_i.size, new_i.size):
# if at_river_mouth(z, (new_i[n - 1], new_j[n - 1]), sea_level):
# pits = False
# break
if at_end_of_domain(z, (new_i[n - 1], new_j[n - 1])):
pits = False
break
sorted_n = sort_lowest_neighbors(n_levee, (new_i[n - 1], new_j[n - 1]))
if (sorted_n[0][0], sorted_n[1][0]) not in zip(
new_i[: n - 1], new_j[: n - 1]
):
downstream_ij = (sorted_n[0][0], sorted_n[1][0])
elif (sorted_n[0][1], sorted_n[1][1]) not in zip(
new_i[: n - 1], new_j[: n - 1]
):
downstream_ij = (sorted_n[0][1], sorted_n[1][1])
elif (sorted_n[0][2], sorted_n[1][2]) not in zip(
new_i[: n - 1], new_j[: n - 1]
):
downstream_ij = (sorted_n[0][2], sorted_n[1][2])
else:
raise RuntimeError("river course is going crazy!")
if downstream_ij not in old_course and below_sea_level(
z[downstream_ij], sea_level
):
pits = False
break
if downstream_ij in old_course:
new_i[n], new_j[n] = downstream_ij
n += 1
pits = False
break
if z[downstream_ij] > z[new_i[n - 1], new_j[n - 1]]:
new_i[n], new_j[n] = downstream_ij
z[new_i[n - 1], new_j[n - 1]] += 1e-6
else:
new_i[n], new_j[n] = downstream_ij
# new_i[n], new_j[n] = lowest_neighbor(z, (new_i[n - 1], new_j[n - 1]))
if n == 0:
raise RuntimeError("new river length is zero!")
return new_i[:n], new_j[:n]
def update_course(z, riv_i, riv_j, ch_depth, slope, sea_level=None, dx=1.0, dy=1.0):
if sea_level is None:
sea_level = -np.finfo(float).max
course_update = 0
last_elev = z[riv_i[-1], riv_j[-1]] + ch_depth - sea_level
max_cell_h = slope * dx
test_elev = np.copy(z)
test_elev -= sea_level
test_elev[riv_i, riv_j] += 2 * ch_depth
low_adj_cell = lowest_cell_elev(test_elev, (riv_i[-1], riv_j[-1]))
# check for coastal avulsion (happens if river is prograding too far alongshore)
if (
(
riv_i[-1] == riv_i[-2] == riv_i[-3] == riv_i[-4] == riv_i[-5]
) # if last 5 river cells flowing alongshore
and (z[riv_i[-2] + 1, riv_j[-2]] > sea_level)
# (z[riv_i[-1+1],riv_j[-1]] > sea_level) and # if land between river and ocean for last 3 cells
and (z[riv_i[-3] + 1, riv_j[-3]] > sea_level)
and (z[riv_i[-4] + 1, riv_j[-4]] > sea_level)
and (z[riv_i[-5] + 1, riv_j[-5]] > sea_level)
and (z[riv_i[-1] + 2, riv_j[-1]] <= sea_level)
and (z[riv_i[-2] + 2, riv_j[-2]] <= sea_level)
and (z[riv_i[-3] + 2, riv_j[-3]] <= sea_level)
and (z[riv_i[-4] + 2, riv_j[-4]] <= sea_level)
and (z[riv_i[-5] + 2, riv_j[-5]] <= sea_level)
):
# fill up old river mouth
z[riv_i[-1], riv_j[-1]] += ch_depth
# turn river towards ocean
riv_i[-1] = riv_i[-2] + 1
riv_j[-1] = riv_j[-2]
if (z[riv_i[-1], riv_j[-1]] - sea_level) < (0.001 * max_cell_h):
z[riv_i[-1], riv_j[-1]] = (0.001 * max_cell_h) + sea_level
# z[riv_i[-1],riv_j[-1]] -= ch_depth
downcut.cut_new(riv_i[-3:], riv_j[-3:], z, sea_level, ch_depth, dx=dx, dy=dy)
course_update = 7 # coastal avulsion
elif last_elev <= 0:
riv_i = riv_i[:-1]
riv_j = riv_j[:-1]
course_update = 4 # shortened course
# if river mouth surrounded by land
elif low_adj_cell > 0:
new_riv_i, new_riv_j = find_course(
z, riv_i, riv_j, len(riv_i), ch_depth, sea_level=sea_level
)
new_riv_length = new_riv_i.size - riv_i.size
if new_riv_length > 0:
riv_i = new_riv_i
riv_j = new_riv_j
if (z[riv_i[-1], riv_j[-1]] - sea_level) < (0.001 * max_cell_h):
z[riv_i[-1], riv_j[-1]] = (0.001 * max_cell_h) + sea_level
downcut.cut_new(
riv_i[-(new_riv_length + 2) :],
riv_j[-(new_riv_length + 2) :],
z,
sea_level,
ch_depth,
dx=dx,
dy=dy,
)
course_update = 6 # lengthened land-locked course
else:
riv_i = riv_i
riv_j = riv_j
# if river mouth needs to prograde
elif last_elev >= max_cell_h:
sorted_n = sort_lowest_neighbors(test_elev, (riv_i[-1], riv_j[-1]))
subaerial_loc = np.where(test_elev[sorted_n] > 0)
if len(subaerial_loc[0]):
subaerial_cells = sorted_n[0][subaerial_loc], sorted_n[1][subaerial_loc]
if (subaerial_cells[0][0], subaerial_cells[1][0]) not in zip(riv_i, riv_j):
riv_i = np.append(riv_i, subaerial_cells[0][0])
riv_j = np.append(riv_j, subaerial_cells[1][0])
if (z[riv_i[-1], riv_j[-1]] - sea_level) < (0.001 * max_cell_h):
z[riv_i[-1], riv_j[-1]] = (0.001 * max_cell_h) + sea_level
# line below not needed if downcutting
# z[riv_i[-1], riv_j[-1]] -= ch_depth
downcut.cut_new(
riv_i[-3:], riv_j[-3:], z, sea_level, ch_depth, dx=dx, dy=dy
)
course_update = 5
else:
riv_i = riv_i
riv_j = riv_j
else:
riv_i = riv_i
riv_j = riv_j
else:
riv_i = riv_i
riv_j = riv_j
return riv_i, riv_j, course_update
|
|
import sys, os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(SCRIPT_DIR, '..'))
import json
import logging
import re
import sqlite3
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.websocket
from pysqrl import sqrl_conv
from pysqrl.server import SqrlHandler
#PORT = 8080
#SCHEME = 'http'
#if PORT == 443:
# SCHEME = 'https'
# URL = "%s://raiom.no" % (SCHEME)
#elif PORT == 80:
# URL = "%s://raiom.no" % (SCHEME)
#else:
# URL = "%s://raiom.no:%i" % (SCHEME, PORT)
PORT = 8080
SCHEME = 'http'
URL = "https://kanskje.de/sqrl"
sqrl_callback = None # TODO: Come up with a better apprach than global variable...?
class SqrlCallbackHandler(SqrlHandler):
def __init__(self):
self._websockets = []
self._conn = sqlite3.connect(os.path.join(SCRIPT_DIR, 'sample_server.sqlite'))
self._conn.row_factory = sqlite3.Row
self._db = self._conn.cursor()
try:
self._db.execute('select count(*) from sqrl_user')
except sqlite3.OperationalError:
self._db.execute("""create table sqrl_user (id INT, username TEXT, idk TEXT, suk TEXT, vuk TEXT)""")
self._sessions = {}
def close(self):
logging.info("closing db")
self._db.close()
self._conn.close()
def ident(self, session_id, idk, suk, vuk):
if not self.id_found(idk):
pass
self._db.execute("insert into sqrl_user (idk, suk, vuk) values(?, ?, ?)", [str(idk), str(suk), str(vuk)])
self._conn.commit()
if session_id in self._sessions:
# TODO: Reauthenticating a session?
logging.error("Ehhh, what?")
self._sessions[session_id] = idk
redirect_url = '%s/user?session_id=%s&msg=Session+authenticated' % (URL, session_id)
for ws in self._websockets:
if ws._session_id == session_id:
ws.redirect_socket_endpoint(redirect_url)
return redirect_url
def id_found(self, idk):
return self.get_user(idk) is not None
def get_user(self, idk):
return self._db.execute("select * from sqrl_user where idk = ?", [idk]).fetchone()
def update_user(self, idk, username):
self._db.execute("update sqrl_user set username = ? where idk = ?", [username, idk])
self._conn.commit()
def add_ws(self, ws):
self._websockets.append(ws)
def remove_ws(self, ws):
if ws in self._websockets:
self._websockets.remove(ws)
class SocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, application, request, **kwargs):
tornado.websocket.WebSocketHandler.__init__(self, application, request, **kwargs)
self._session_id = None
def check_origin(self, origin):
# http://tornadokevinlee.readthedocs.org/en/latest/websocket.html#tornado.websocket.WebSocketHandler.check_origin
return True
def open(self):
logging.info('opened')
def on_message(self, message):
logging.info("on_message: " + message)
data = json.loads(message)
self._session_id = data['session_id']
sqrl_callback.add_ws(self)
def on_close(self):
logging.info('closed')
sqrl_callback.remove_ws(self)
def redirect_socket_endpoint(self, url):
self.write_message('{"url": "%s"}' % url)
class SqrlRequestHandler(tornado.web.RequestHandler):
def post(self):
server = sqrl_callback.post(
self.get_argument('client', ""),
self.get_argument('server', ""),
self.get_argument('ids', ""))
self.write(sqrl_conv.base64_encode(server))
class HtmlHandler(tornado.web.RequestHandler):
def get_style_css(self):
self.writeln("@-webkit-keyframes fadeIt {")
self.writeln(" 0% { text-shadow: 0 0 25px red; }")
self.writeln("}")
self.writeln("@-moz-keyframes fadeIt {")
self.writeln(" 0% { text-shadow: 0 0 25px red; }")
self.writeln("}")
self.writeln("@-o-keyframes fadeIt {")
self.writeln(" 0% { text-shadow: 0 0 25px red; }")
self.writeln("}")
self.writeln("@keyframes fadeIt {")
self.writeln(" 0% { text-shadow: 0 0 25px red; }")
self.writeln("}")
self.writeln(".fadeShadow {")
self.writeln(" background-image:none !important;")
self.writeln(" -webkit-animation: fadeIt 3s linear;")
self.writeln(" -moz-animation: fadeIt 3s linear;")
self.writeln(" -o-animation: fadeIt 3s linear;")
self.writeln(" animation: fadeIt 3s linear;")
self.writeln("}")
self.set_header('Content-Type', 'text/css')
def get(self, path):
logging.debug("path: %r", path)
if path == 'style.css':
self.get_style_css()
elif path.startswith('user'):
self.get_user()
elif path.startswith('cancel'):
self.get_cancel()
elif path == '' or path == 'index.html':
self.get_index_html()
else:
self.send_error(404)
def get_index_html(self):
nut = sqrl_callback.get_nut()
ws_url = URL.replace('http', 'ws')
if URL.startswith('https'):
sqrl_url = URL.replace('https', 'sqrl')
else:
sqrl_url = URL.replace('http', 'qrl')
sqrl_url = '%s/sqrl?nut=%s' % (sqrl_url, nut)
#sqrl_url = '%s/sqrl?nut=%s&sfn=%s&can=%s' % (sqrl_url, nut,
# sqrl_conv.base64_encode("Fisken").decode(),
# sqrl_conv.base64_encode(URL + '/cancel').decode())
encoded_sqrl = sqrl_conv.base64_encode(sqrl_url).decode()
localhost_url = 'http://localhost:25519/' + encoded_sqrl
self.writeln("<html><head><title>Title goes here.</title></head>")
self.writeln("<body>")
self.writeln(" <p>Blipp fisken</p>")
self.writeln(" <a href='%s'>login</a>" % (sqrl_url))
self.writeln(" <a href='%s'>localhost</a>" % (localhost_url))
self.writeln(' <a href="%s" id="sqrl" onclick="sqrlLinkClick(\'%s\');" tabindex="-1">sqrl</a>' % (sqrl_url, encoded_sqrl))
self.writeln(' <script>')
self.writeln(' var ws = new WebSocket("%s/ws");' % (ws_url))
self.writeln(' ws.onopen = function(){')
self.writeln(' console.log("onopen");')
self.writeln(" ws.send('{\"session_id\": \"%s\"}');" % (nut))
self.writeln(' };')
self.writeln(' ws.onmessage = function(ev){')
self.writeln(' console.log("onmessage ev.data " + ev.data);')
self.writeln(' var json = JSON.parse(ev.data);')
self.writeln(' window.location.href = json.url;')
self.writeln(' };')
self.writeln(' ws.onclose = function(ev){')
self.writeln(' console.log("onclose");')
self.writeln(' };')
self.writeln(' ws.onerror = function(ev){')
self.writeln(' console.log("onerror");')
self.writeln(' };')
self.writeln(' </script>')
self.writeln(' <script>')
self.writeln(' var newSync, lastSync, encodedSqrlUrl = false;')
#self.writeln(' var syncQuery = window.XMLHttpRequest ? new window.XMLHttpRequest() : new ActiveXObject('MSXML2.XMLHTTP.3.0');
self.writeln(' var gifProbe = new Image(); // create an instance of a memory-based probe image')
self.writeln(' var localhostRoot = "http://localhost:25519/"; // the SQRL client listener')
self.writeln(' Date.now = Date.now || function() { return (+new Date()) }; // add old browser Date.now() support_)')
self.writeln('')
#self.writeln(' window.onload = function() {')
#self.writeln(' if ((navigator.userAgent.match(/linux/i)) && !(navigator.userAgent.match(/sqrl/i)) && !(navigator.userAgent.match(/android/i)))')
#self.writeln(' {')
#self.writeln(' document.getElementById("sqrl").onclick = function() { sqrlLinkClick(this); return false; };')
#self.writeln(' }')
#self.writeln(' }')
self.writeln('')
self.writeln(' gifProbe.onload = function() { // define our load-success function')
self.writeln(' console.log("gifProbe.onload");')
self.writeln(' document.location.href = localhostRoot + encodedSqrlUrl;')
self.writeln(' };')
self.writeln('')
self.writeln(' gifProbe.onerror = function() { // define our load-failure function')
self.writeln(' setTimeout( function(){ gifProbe.src = localhostRoot + Date.now() + ".gif"; }, 250 );')
self.writeln(' }')
self.writeln('')
self.writeln(' function sqrlLinkClick(encodedSqrlUrlIn) {')
self.writeln(' console.log("asdf");')
self.writeln(' console.log(encodedSqrlUrlIn);')
self.writeln(' encodedSqrlUrl = encodedSqrlUrlIn;')
self.writeln(' if ( encodedSqrlUrl ) { gifProbe.onerror(); }; // trigger the initial image probe query')
self.writeln(' }')
self.writeln(' </script>')
self.writeln(" <br/>")
self.writeln("</body></html>")
def get_user(self):
session_id = self.get_argument('session_id', None)
try:
idk = sqrl_callback._sessions[session_id]
except KeyError:
return self.redirect('/sqrl')
user = sqrl_callback.get_user(idk)
msg = self.get_argument('msg', None)
self.writeln("<html>")
self.writeln(" <head>")
self.writeln(" <title>Title goes here.</title></head>")
self.writeln(' <link href="/sqrl/style.css" rel="stylesheet" type="text/css"/>')
self.writeln(" </head>")
self.writeln("<body>")
self.writeln(" <p>Blipp fisken</p>")
self.writeln(" <p class='fadeShadow'>%s</p>" % msg)
self.writeln(" <p>Session: %s</p>" % (session_id))
self.writeln(" <p>idk: %s</p>" % (user['idk']))
self.writeln(" <p>suk: %s</p>" % (user['suk']))
self.writeln(" <p>vuk: %s</p>" % (user['vuk']))
self.writeln(" <form method='post'>")
self.writeln(" <input type='hidden' name='session_id' value='%s'>" % (session_id))
self.writeln(" <label for='blapp'>Display name:")
self.writeln(" <input type='text' name='blapp' value='%s'>" % (user['username'] if user['username'] else ''))
self.writeln(" </label>")
self.writeln(" <input type='submit' value='submit'>")
self.writeln(" </form>")
self.writeln("</body></html>")
def get_cancel(self):
self.writeln("<html>")
self.writeln(" <head>")
self.writeln(" <title>Title goes here.</title></head>")
self.writeln(" </head>")
self.writeln("<body>")
self.writeln(" <p>Authentication process was cancelled</p>")
self.writeln("</body></html>")
def post(self, path):
session_id = self.get_argument('session_id', None)
username = self.get_argument('blapp', None)
try:
idk = sqrl_callback._sessions[session_id]
except KeyError:
return self.redirect('/sqrl')
sqrl_callback.update_user(idk, username)
self.redirect('/sqrl/user?session_id=%s&msg=User+updated' % session_id)
def get_argument(self, key, default):
argument = tornado.web.RequestHandler.get_argument(self, key, default)
if re.match(r'^[A-Za-z0-9_ +-]*$', argument):
return argument
logging.error("Input did not match! %r", argument)
return default
def writeln(self, text):
self.write(text)
self.write('\n')
def log_setup(verbose=False, logfilename=None, name=None):
formatter = logging.Formatter('%(asctime)s %(levelname)8s: %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.ERROR)
if verbose:
console.setLevel(logging.DEBUG)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(console)
if logfilename is None:
return
logfile = logging.FileHandler(logfilename)
logfile.setFormatter(formatter)
logfile.setLevel(logging.INFO)
if verbose:
logfile.setLevel(logging.DEBUG)
logger.addHandler(logfile)
if __name__ == "__main__":
log_setup(verbose=True)
sqrl_callback = SqrlCallbackHandler()
application = tornado.web.Application([
(r'/ws', SocketHandler),
(r"/sqrl", SqrlRequestHandler),
(r"/(.*)", HtmlHandler),
])
ssl_options = None
if SCHEME == 'https':
ssl_options = {
"certfile": os.path.join(__dir__, "ssl", "signed.crt"),
"keyfile": os.path.join(__dir__, "ssl", "domain.key"),
}
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
http_server.listen(PORT)
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
sqrl_callback.close()
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import django.utils.timezone
from django.conf import settings
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Charge',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('stripe_id', models.CharField(unique=True, max_length=50)),
('card_last_4', models.CharField(max_length=4, blank=True)),
('card_kind', models.CharField(max_length=50, blank=True)),
('amount', models.DecimalField(null=True, max_digits=7, decimal_places=2)),
('amount_refunded', models.DecimalField(null=True, max_digits=7, decimal_places=2)),
('description', models.TextField(blank=True)),
('paid', models.NullBooleanField()),
('disputed', models.NullBooleanField()),
('refunded', models.NullBooleanField()),
('fee', models.DecimalField(null=True, max_digits=7, decimal_places=2)),
('receipt_sent', models.BooleanField(default=False)),
('charge_created', models.DateTimeField(null=True, blank=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CurrentSubscription',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('plan', models.CharField(max_length=100)),
('quantity', models.IntegerField()),
('start', models.DateTimeField()),
('status', models.CharField(max_length=25)),
('cancel_at_period_end', models.BooleanField(default=False)),
('canceled_at', models.DateTimeField(null=True, blank=True)),
('current_period_end', models.DateTimeField(null=True)),
('current_period_start', models.DateTimeField(null=True)),
('ended_at', models.DateTimeField(null=True, blank=True)),
('trial_end', models.DateTimeField(null=True, blank=True)),
('trial_start', models.DateTimeField(null=True, blank=True)),
('amount', models.DecimalField(max_digits=7, decimal_places=2)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('stripe_id', models.CharField(unique=True, max_length=50)),
('card_fingerprint', models.CharField(max_length=200, blank=True)),
('card_last_4', models.CharField(max_length=4, blank=True)),
('card_kind', models.CharField(max_length=50, blank=True)),
('date_purged', models.DateTimeField(null=True, editable=False)),
('user', models.OneToOneField(null=True, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('stripe_id', models.CharField(unique=True, max_length=50)),
('kind', models.CharField(max_length=250)),
('livemode', models.BooleanField(default=False)),
('webhook_message', jsonfield.fields.JSONField(default=dict)),
('validated_message', jsonfield.fields.JSONField(null=True)),
('valid', models.NullBooleanField()),
('processed', models.BooleanField(default=False)),
('customer', models.ForeignKey(to='djstripe.Customer', null=True, on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventProcessingException',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('data', models.TextField()),
('message', models.CharField(max_length=500)),
('traceback', models.TextField()),
('event', models.ForeignKey(to='djstripe.Event', null=True, on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('stripe_id', models.CharField(max_length=50)),
('attempted', models.NullBooleanField()),
('attempts', models.PositiveIntegerField(null=True)),
('closed', models.BooleanField(default=False)),
('paid', models.BooleanField(default=False)),
('period_end', models.DateTimeField()),
('period_start', models.DateTimeField()),
('subtotal', models.DecimalField(max_digits=7, decimal_places=2)),
('total', models.DecimalField(max_digits=7, decimal_places=2)),
('date', models.DateTimeField()),
('charge', models.CharField(max_length=50, blank=True)),
('customer', models.ForeignKey(related_name='invoices', to='djstripe.Customer', on_delete=models.CASCADE)),
],
options={
'ordering': ['-date'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InvoiceItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('stripe_id', models.CharField(max_length=50)),
('amount', models.DecimalField(max_digits=7, decimal_places=2)),
('currency', models.CharField(max_length=10)),
('period_start', models.DateTimeField()),
('period_end', models.DateTimeField()),
('proration', models.BooleanField(default=False)),
('line_type', models.CharField(max_length=50)),
('description', models.CharField(max_length=200, blank=True)),
('plan', models.CharField(max_length=100, blank=True)),
('quantity', models.IntegerField(null=True)),
('invoice', models.ForeignKey(related_name='items', to='djstripe.Invoice', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('stripe_id', models.CharField(unique=True, max_length=50)),
('name', models.CharField(max_length=100)),
('currency', models.CharField(max_length=10, choices=[('usd', 'U.S. Dollars'), ('gbp', 'Pounds (GBP)'), ('eur', 'Euros')])),
('interval', models.CharField(max_length=10, verbose_name='Interval type', choices=[('week', 'Week'), ('month', 'Month'), ('year', 'Year')])),
('interval_count', models.IntegerField(default=1, null=True, verbose_name='Intervals between charges')),
('amount', models.DecimalField(verbose_name='Amount (per period)', max_digits=7, decimal_places=2)),
('trial_period_days', models.IntegerField(null=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Transfer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('stripe_id', models.CharField(unique=True, max_length=50)),
('amount', models.DecimalField(max_digits=7, decimal_places=2)),
('status', models.CharField(max_length=25)),
('date', models.DateTimeField()),
('description', models.TextField(null=True, blank=True)),
('adjustment_count', models.IntegerField()),
('adjustment_fees', models.DecimalField(max_digits=7, decimal_places=2)),
('adjustment_gross', models.DecimalField(max_digits=7, decimal_places=2)),
('charge_count', models.IntegerField()),
('charge_fees', models.DecimalField(max_digits=7, decimal_places=2)),
('charge_gross', models.DecimalField(max_digits=7, decimal_places=2)),
('collected_fee_count', models.IntegerField()),
('collected_fee_gross', models.DecimalField(max_digits=7, decimal_places=2)),
('net', models.DecimalField(max_digits=7, decimal_places=2)),
('refund_count', models.IntegerField()),
('refund_fees', models.DecimalField(max_digits=7, decimal_places=2)),
('refund_gross', models.DecimalField(max_digits=7, decimal_places=2)),
('validation_count', models.IntegerField()),
('validation_fees', models.DecimalField(max_digits=7, decimal_places=2)),
('event', models.ForeignKey(related_name='transfers', to='djstripe.Event', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TransferChargeFee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('amount', models.DecimalField(max_digits=7, decimal_places=2)),
('application', models.TextField(null=True, blank=True)),
('description', models.TextField(null=True, blank=True)),
('kind', models.CharField(max_length=150)),
('transfer', models.ForeignKey(related_name='charge_fee_details', to='djstripe.Transfer', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='currentsubscription',
name='customer',
field=models.OneToOneField(related_name='current_subscription', null=True, to='djstripe.Customer'),
preserve_default=True,
),
migrations.AddField(
model_name='charge',
name='customer',
field=models.ForeignKey(related_name='charges', to='djstripe.Customer', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='charge',
name='invoice',
field=models.ForeignKey(related_name='charges', to='djstripe.Invoice', null=True, on_delete=models.CASCADE),
preserve_default=True,
),
]
|
|
import warnings
import unittest as unittest
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram, \
GrandPotentialPhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.analysis.interface_reactions import InterfacialReactivity
class InterfaceReactionTest(unittest.TestCase):
def setUp(self):
self.entries = [ComputedEntry(Composition('Li'), 0),
ComputedEntry(Composition('Mn'), 0),
ComputedEntry(Composition('O2'), 0),
ComputedEntry(Composition('MnO2'), -10),
ComputedEntry(Composition('Mn2O4'), -60),
ComputedEntry(Composition('MnO3'), 20),
ComputedEntry(Composition('Li2O'), -10),
ComputedEntry(Composition('Li2O2'), -8),
ComputedEntry(Composition('LiMnO2'), -30)
]
self.pd = PhaseDiagram(self.entries)
chempots = {'Li': -3}
self.gpd = GrandPotentialPhaseDiagram(self.entries, chempots)
self.ir = []
# ir[0]
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
# ir[1]
self.ir.append(
InterfacialReactivity(Composition('MnO2'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[2]
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[3]
self.ir.append(
InterfacialReactivity(Composition('Li2O'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[4]
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[5]
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('Li2O'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[6]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=True))
# ir[7]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
# ir[8]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
self.gpd, norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=True))
# ir[9]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
self.gpd, norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[10]
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.pd, norm=1, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
# ir[11]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li2O2'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[12]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li2O2'),
self.pd, norm=1, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
with self.assertRaises(Exception) as context1:
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=1,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide grand phase diagram '
'to compute no_mixing_energy!' == str(context1.exception))
with self.assertRaises(Exception) as context2:
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.gpd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide non-grand phase diagram '
'to compute no_mixing_energy!' == str(context2.exception))
def test_get_entry_energy(self):
# Test warning message.
comp = Composition('MnO3')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
energy = InterfacialReactivity._get_entry_energy(self.pd, comp)
self.assertTrue(len(w) == 1)
self.assertTrue("The reactant MnO3 has no matching entry with"
" negative formation energy, instead convex "
"hull energy for this composition will be used"
" for reaction energy calculation."
in str(w[-1].message))
test1 = np.isclose(energy, -30, atol=1e-03)
self.assertTrue(test1,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
# Test normal functionality
comp = Composition('MnO2')
test2 = np.isclose(
InterfacialReactivity._get_entry_energy(self.pd, comp),
-30, atol=1e-03)
self.assertTrue(test2,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
def test_get_grand_potential(self):
comp = Composition('LiMnO2')
# Test non-normalized case
test1 = np.isclose(self.ir[1]._get_grand_potential(comp), -27,
atol=1e-03)
self.assertTrue(test1,
'_get_grand_potential: '
'Non-normalized case gets error!')
# Test normalized case
test2 = np.isclose(self.ir[2]._get_grand_potential(comp), -9,
atol=1e-03)
self.assertTrue(test2,
'_get_grand_potential: '
'Normalized case gets error!')
comp2 = Composition('Li2O2')
# Test use_hull_energy option.
test3 = np.isclose(self.ir[8]._get_grand_potential(comp2), -4,
atol=1e-03)
self.assertTrue(test3,
'_get_grand_potential: '
'get hull energy gets error!')
test4 = np.isclose(self.ir[9]._get_grand_potential(comp2), -2,
atol=1e-03)
self.assertTrue(test4,
'_get_grand_potential: '
'gets error for {}!'.format(comp2.reduced_formula))
def test_get_energy(self):
test1 = (np.isclose(self.ir[0]._get_energy(0.5), -15, atol=1e-03))
self.assertTrue(test1, '_get_energy: phase diagram gets error!')
test2 = (
np.isclose(self.ir[3]._get_energy(0.6666666),
-7.333333, atol=1e-03))
self.assertTrue(test2,
'_get_energy: '
'grand canonical phase diagram gets error!')
test3 = (
np.isclose(self.ir[6]._get_energy(0.3333333),
-3.333333, atol=1e-03))
self.assertTrue(test3,
'_get_energy: convex hull energy gets error. ')
test4 = (
np.isclose(self.ir[7]._get_energy(0.3333333),
-4, atol=1e-03))
self.assertTrue(test4,
'_get_energy: gets error. ')
def test_get_reaction(self):
test1 = str(self.ir[0]._get_reaction(0.5)) == '0.5 O2 + 0.5 Mn -> ' \
'0.5 MnO2'
self.assertTrue(test1,
'_get_reaction: '
'reaction not involving chempots species gets error!')
test2 = str(self.ir[3]._get_reaction(0.666666)) \
== '0.5 Mn + 0.5 Li2O -> Li + 0.25 MnO2 + 0.25 Mn' \
or str(self.ir[3]._get_reaction(0.666666)) \
== '0.5 Mn + 0.5 Li2O -> Li + 0.25 Mn + 0.25 MnO2'
self.assertTrue(test2,
'_get_reaction: '
'reaction involving chempots species gets error!')
def test_get_get_elmt_amt_in_rxt(self):
rxt1 = Reaction(
[Composition('Mn'), Composition('O2'), Composition('Li')],
[Composition('LiMnO2')])
test1 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt1), 3)
self.assertTrue(test1,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
rxt2 = rxt1
rxt2.normalize_to(Composition('Li'), 0.5)
test2 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt2), 1.5)
self.assertTrue(test2,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
rxt3 = Reaction([Composition('O2'), Composition('Li')],
[Composition('Li2O')])
# Li is not counted
test3 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt3), 1)
self.assertTrue(test3,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
# Li is counted
test4 = np.isclose(self.ir[6]._get_elmt_amt_in_rxt(rxt3), 3)
self.assertTrue(test4,
'_get_get_elmt_amt_in_rxt: '
'pd elements amounts gets error!')
def test_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [InterfacialReactivity._convert(x, f1, f2)
for x, f1, f2 in test_array]
answer = [0.75, 0.5, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected,'
' but gets {1}'.format(answer, result))
def test_reverse_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [InterfacialReactivity._reverse_convert(x, f1, f2)
for x, f1, f2 in test_array]
answer = [0.25, 0.3076923, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected,'
' but gets {1}'.format(answer, result))
def test_get_products(self):
test1 = sorted(self.ir[0].get_products()) == sorted(
['MnO2', 'O2', 'Mn'])
self.assertTrue(test1,
'get_products: decomposition products gets error '
'for reaction not involving chempots species!')
test2 = sorted(self.ir[3].get_products()) == sorted(
['Li', 'MnO2', 'Mn', 'Li2O'])
self.assertTrue(test2,
'get_decomp: decomposition products gets error '
'for reaction involving chempots species!')
def test_get_kinks(self):
def test_get_kinks_helper(ir, index_expect,
x_kink_expect, energy_kink_expect,
react_kink_expect,
energy_per_rxt_kink_expect):
lst = list(ir.get_kinks())
index = [i[0] for i in lst]
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
react_kink = [str(i[3]) for i in lst]
energy_per_rxt_kink = [i[4] for i in lst]
test1 = index == index_expect
self.assertTrue(test1, 'get_kinks:index gets error!')
test2 = np.allclose(x_kink, x_kink_expect)
self.assertTrue(test2, 'get_kinks:x kinks gets error!')
test3 = np.allclose(energy_kink, energy_kink_expect)
self.assertTrue(test3, 'get_kinks:energy kinks gets error!')
# Testing reaction strings are hard,
# as species could be arranged in random order.
test4 = len(react_kink) == len(react_kink_expect)
self.assertTrue(test4,
'get_kinks: reaction kinks '
'gets error for {0} and {1} reaction!'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
test5 = np.allclose(energy_per_rxt_kink,
energy_per_rxt_kink_expect)
self.assertTrue(test5,
'get_kinks: energy_per_rxt_kinks gets error!')
test_get_kinks_helper(self.ir[0], [1, 2, 3], [0, 0.5, 1],
[0, -15, 0],
['Mn -> Mn', '0.5 O2 + 0.5 Mn -> 0.5 MnO2',
'O2 -> O2'],
[0,
-15 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
0])
test_get_kinks_helper(self.ir[10], [1, 2, 3], [0, 0.66667, 1],
[0, -10, 0],
['Mn -> Mn', '0.5 O2 + 0.5 Mn -> 0.5 MnO2',
'O2 -> O2'],
[0,
-15 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
0])
test_get_kinks_helper(self.ir[11], [1, 2], [0, 1], [-3, -3],
['Li2O2 + 2 Li -> 2 Li2O',
'Li2O2 + 2 Li -> 2 Li2O'],
[-6 * InterfacialReactivity.EV_TO_KJ_PER_MOL] *
2)
test_get_kinks_helper(self.ir[12], [1, 2], [0, 1], [-0.5, -0.5],
['Li2O2 -> Li2O + 0.5 O2',
'Li2O2 -> Li2O + 0.5 O2'],
[-2 * InterfacialReactivity.EV_TO_KJ_PER_MOL] *
2)
def test_convexity(self):
def test_convexity_helper(ir):
lst = list(ir.get_kinks())
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
points = list(zip(x_kink, energy_kink))
if len(points) >= 3:
# To test convexity of the plot, construct convex hull from
# the kinks and make sure
# 1. all points are below the end points
# 2. all points are on the convex hull.
relative_vectors_1 = [(x - x_kink[0], e - energy_kink[0])
for x, e in points]
relative_vectors_2 = [(x - x_kink[-1], e - energy_kink[-1])
for x, e in points]
relative_vectors = zip(relative_vectors_1, relative_vectors_2)
positions = [np.cross(v1, v2) for v1, v2 in relative_vectors]
test1 = np.all(np.array(positions) <= 0)
hull = ConvexHull(points)
test2 = len(hull.vertices) == len(points)
self.assertTrue(test1 and test2,
'Error: Generating non-convex plot!')
test_convexity_helper(self.ir[0])
test_convexity_helper(self.ir[1])
test_convexity_helper(self.ir[2])
test_convexity_helper(self.ir[3])
test_convexity_helper(self.ir[4])
test_convexity_helper(self.ir[5])
test_convexity_helper(self.ir[6])
test_convexity_helper(self.ir[7])
test_convexity_helper(self.ir[8])
test_convexity_helper(self.ir[9])
test_convexity_helper(self.ir[10])
test_convexity_helper(self.ir[11])
test_convexity_helper(self.ir[12])
def test_get_original_composition_ratio(self):
# expected reaction1: 0.5 O2 + 0.5 Mn -> 0.5 MnO2
reaction1 = self.ir[0]._get_reaction(0.5)
test1 = np.isclose(self.ir[0]._get_original_composition_ratio(
reaction1), 0.5)
self.assertTrue(test1,
'_get_original_composition_ratio: '
'reaction not involving chempots species gets error!')
# expected reaction2: 0.5 Mn + 0.5 Li2O -> Li + 0.25 MnO2 + 0.25 Mn
reaction2 = self.ir[3]._get_reaction(0.666666)
test2 = np.isclose(self.ir[3]._get_original_composition_ratio(
reaction2), 0.5)
self.assertTrue(test2,
'_get_original_composition_ratio: '
'reaction involving chempots species gets error!')
def test_get_critical_original_kink_ratio(self):
test1 = np.allclose(self.ir[0].get_critical_original_kink_ratio(),
[0, 0.5, 1])
self.assertTrue(test1, 'get_critical_original_kink_ratio:'
' gets error!')
test2 = np.allclose(self.ir[10].get_critical_original_kink_ratio(),
[0, 0.5, 1])
self.assertTrue(test2, 'get_critical_original_kink_ratio:'
' gets error!')
test3 = np.allclose(self.ir[11].get_critical_original_kink_ratio(),
[0, 1])
self.assertTrue(test3, 'get_critical_original_kink_ratio:'
' gets error!')
test4 = np.allclose(self.ir[2].get_critical_original_kink_ratio(),
[0, 0.5, 1])
self.assertTrue(test4, 'get_critical_original_kink_ratio:'
' gets error!')
test5 = np.allclose(self.ir[3].get_critical_original_kink_ratio(),
[0, 0.66666, 1])
self.assertTrue(test5, 'get_critical_original_kink_ratio:'
' gets error!')
def test_labels(self):
ir = self.ir[0]
dict = ir.labels()
test1 = dict == {1: 'x= 0.0 energy in eV/atom = 0.0 Mn -> Mn',
2: 'x= 0.5 energy in eV/atom = -15.0 0.5 O2 + 0.5 '
'Mn -> 0.5 MnO2',
3: 'x= 1.0 energy in eV/atom = 0.0 O2 -> O2'}
self.assertTrue(test1,
'labels:label does not match for interfacial system '
'with {0} and {1}.'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
def test_plot(self):
# Test plot is hard. Here just to call the plot function to see if any
# error occurs.
for i in self.ir:
i.plot()
def test_minimum(self):
answer = [
(0.5, -15),
(0, 0),
(0.3333333, -10),
(0.6666666, -7.333333),
(0.3333333, -7.333333),
(0.1428571, -7.333333),
(0.3333333, -3.333333),
(0.3333333, -4.0),
]
for i, j in zip(self.ir, answer):
self.assertTrue(np.allclose(i.minimum(), j),
'minimum: the system with {0} and {1} '
'gets error!{2} expected, but gets {3}'.format(
i.c1_original.reduced_formula,
i.c2_original.reduced_formula, str(j),
str(i.minimum())))
def test_get_no_mixing_energy(self):
with self.assertRaises(Exception) as context1:
self.ir[0].get_no_mixing_energy()
self.assertTrue(
'Please provide grand potential phase diagram'
' for computing no_mixing_energy!' == str(context1.exception))
answer = [
[(u'MnO2 (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Li2O (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Mn (eV/atom)', 0.0), (u'Li2O (eV/atom)', 0.0)]
]
def name_lst(lst):
return (lst[0][0], lst[1][0])
def energy_lst(lst):
return (lst[0][1], lst[1][1])
result_info = [i.get_no_mixing_energy() for i in self.ir if i.grand]
for i, j in zip(result_info, answer):
self.assertTrue(name_lst(i) == name_lst(j),
'get_no_mixing_energy: names get error,'
' {0} expected but gets {1}'.format(
name_lst(j), name_lst(i)))
self.assertTrue(np.allclose(energy_lst(i), energy_lst(j)),
'get_no_mixing_energy: '
'no_mixing energies get error, '
'{0} expected but gets {1}'.format(
energy_lst(j), energy_lst(i)))
def test_get_chempot_correction(self):
# test data from fig. 6 in ref:
# Prediction of A2BX4 metal-chalcogenide compounds via
# first-principles thermodynamics, PHYSICAL REVIEW B 86, 014109 (2012)
# test pressure effect.
actual = InterfacialReactivity.get_chempot_correction("O", 298.15,
100E5)
expect = 0.05916
self.assertTrue(np.isclose(actual, expect, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect,
actual))
# test temperature effect.
actual_2 = InterfacialReactivity.get_chempot_correction("O", 1000,
1E5)
expect_2 = -0.82352
self.assertTrue(np.isclose(actual_2, expect_2, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_2,
actual_2))
actual_3 = InterfacialReactivity.get_chempot_correction("O", 500,
1E5)
expect_3 = -0.223
self.assertTrue(np.isclose(actual_3, expect_3, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_3,
actual_3))
# test mixed effect.
actual_4 = InterfacialReactivity.get_chempot_correction("O", 1000,
1E-25)
expect_4 = -3.800
self.assertTrue(np.isclose(actual_4, expect_4, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_4,
actual_4))
actual_5 = InterfacialReactivity.get_chempot_correction("O", 1250,
1E-25)
expect_5 = -4.86
self.assertTrue(np.isclose(actual_5, expect_5, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_5,
actual_5))
actual_6 = InterfacialReactivity.get_chempot_correction("O", 1500,
1E-25)
expect_6 = -5.928
self.assertTrue(np.isclose(actual_6, expect_6, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_6,
actual_6))
actual_7 = InterfacialReactivity.get_chempot_correction("O", 1000,
1E-15)
expect_7 = -2.808
self.assertTrue(np.isclose(actual_7, expect_7, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_7,
actual_7))
# test non-gas phase.
actual_8 = InterfacialReactivity.get_chempot_correction("Li", 1000,
1E15)
expect_8 = 0
self.assertTrue(np.isclose(actual_8, expect_8, atol=1E-5),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_8,
actual_8))
if __name__ == '__main__':
unittest.main()
|
|
# Authors: Alexandre Barachant <[email protected]>
# Asish Panda <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import linalg
from .. import EvokedArray, Evoked
from ..cov import Covariance, _regularized_covariance
from ..decoding import TransformerMixin, BaseEstimator
from ..epochs import BaseEpochs
from ..io import BaseRaw
from ..io.pick import _pick_data_channels, pick_info
from ..utils import logger
from ..externals.six import iteritems, itervalues
def _construct_signal_from_epochs(epochs, events, sfreq, tmin):
"""Reconstruct pseudo continuous signal from epochs."""
n_epochs, n_channels, n_times = epochs.shape
tmax = tmin + n_times / float(sfreq)
start = (np.min(events[:, 0]) + int(tmin * sfreq))
stop = (np.max(events[:, 0]) + int(tmax * sfreq) + 1)
n_samples = stop - start
n_epochs, n_channels, n_times = epochs.shape
events_pos = events[:, 0] - events[0, 0]
raw = np.zeros((n_channels, n_samples))
for idx in range(n_epochs):
onset = events_pos[idx]
offset = onset + n_times
raw[:, onset:offset] = epochs[idx]
return raw
def _least_square_evoked(epochs_data, events, tmin, sfreq):
"""Least square estimation of evoked response from epochs data.
Parameters
----------
epochs_data : array, shape (n_channels, n_times)
The epochs data to estimate evoked.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be ignored.
tmin : float
Start time before event.
sfreq : float
Sampling frequency.
Returns
-------
evokeds : array, shape (n_class, n_components, n_times)
An concatenated array of evoked data for each event type.
toeplitz : array, shape (n_class * n_components, n_channels)
An concatenated array of toeplitz matrix for each event type.
"""
n_epochs, n_channels, n_times = epochs_data.shape
tmax = tmin + n_times / float(sfreq)
# Deal with shuffled epochs
events = events.copy()
events[:, 0] -= events[0, 0] + int(tmin * sfreq)
# Construct raw signal
raw = _construct_signal_from_epochs(epochs_data, events, sfreq, tmin)
# Compute the independent evoked responses per condition, while correcting
# for event overlaps.
n_min, n_max = int(tmin * sfreq), int(tmax * sfreq)
window = n_max - n_min
n_samples = raw.shape[1]
toeplitz = list()
classes = np.unique(events[:, 2])
for ii, this_class in enumerate(classes):
# select events by type
sel = events[:, 2] == this_class
# build toeplitz matrix
trig = np.zeros((n_samples, 1))
ix_trig = (events[sel, 0]) + n_min
trig[ix_trig] = 1
toeplitz.append(linalg.toeplitz(trig[0:window], trig))
# Concatenate toeplitz
toeplitz = np.array(toeplitz)
X = np.concatenate(toeplitz)
# least square estimation
predictor = np.dot(linalg.pinv(np.dot(X, X.T)), X)
evokeds = np.dot(predictor, raw.T)
evokeds = np.transpose(np.vsplit(evokeds, len(classes)), (0, 2, 1))
return evokeds, toeplitz
def _fit_xdawn(epochs_data, y, n_components, reg=None, signal_cov=None,
events=None, tmin=0., sfreq=1., method_params=None, info=None):
"""Fit filters and coefs using Xdawn Algorithm.
Xdawn is a spatial filtering method designed to improve the signal
to signal + noise ratio (SSNR) of the event related responses. Xdawn was
originally designed for P300 evoked potential by enhancing the target
response with respect to the non-target response. This implementation is a
generalization to any type of event related response.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The epochs data.
y : array, shape (n_epochs)
The epochs class.
n_components : int (default 2)
The number of components to decompose the signals signals.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed as ``method`` to
:func:`mne.compute_covariance`.
signal_cov : None | Covariance | array, shape (n_channels, n_channels)
The signal covariance used for whitening of the data.
if None, the covariance is estimated from the epochs signal.
events : array, shape (n_epochs, 3)
The epochs events, used to correct for epochs overlap.
tmin : float
Epochs starting time. Only used if events is passed to correct for
epochs overlap.
sfreq : float
Sampling frequency. Only used if events is passed to correct for
epochs overlap.
Returns
-------
filters : array, shape (n_channels, n_channels)
The Xdawn components used to decompose the data for each event type.
patterns : array, shape (n_channels, n_channels)
The Xdawn patterns used to restore the signals for each event type.
evokeds : array, shape (n_class, n_components, n_times)
The independent evoked responses per condition.
"""
n_epochs, n_channels, n_times = epochs_data.shape
classes = np.unique(y)
# Retrieve or compute whitening covariance
if signal_cov is None:
signal_cov = _regularized_covariance(
np.hstack(epochs_data), reg, method_params, info)
elif isinstance(signal_cov, Covariance):
signal_cov = signal_cov.data
if not isinstance(signal_cov, np.ndarray) or (
not np.array_equal(signal_cov.shape,
np.tile(epochs_data.shape[1], 2))):
raise ValueError('signal_cov must be None, a covariance instance, '
'or an array of shape (n_chans, n_chans)')
# Get prototype events
if events is not None:
evokeds, toeplitzs = _least_square_evoked(
epochs_data, events, tmin, sfreq)
else:
evokeds, toeplitzs = list(), list()
for c in classes:
# Prototyped response for each class
evokeds.append(np.mean(epochs_data[y == c, :, :], axis=0))
toeplitzs.append(1.)
filters = list()
patterns = list()
for evo, toeplitz in zip(evokeds, toeplitzs):
# Estimate covariance matrix of the prototype response
evo = np.dot(evo, toeplitz)
evo_cov = _regularized_covariance(evo, reg, method_params, info)
# Fit spatial filters
try:
evals, evecs = linalg.eigh(evo_cov, signal_cov)
except np.linalg.LinAlgError as exp:
raise ValueError('Could not compute eigenvalues, ensure '
'proper regularization (%s)' % (exp,))
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
_patterns = np.linalg.pinv(evecs.T)
filters.append(evecs[:, :n_components].T)
patterns.append(_patterns[:, :n_components].T)
filters = np.concatenate(filters, axis=0)
patterns = np.concatenate(patterns, axis=0)
evokeds = np.array(evokeds)
return filters, patterns, evokeds
class _XdawnTransformer(BaseEstimator, TransformerMixin):
"""Implementation of the Xdawn Algorithm compatible with scikit-learn.
Xdawn is a spatial filtering method designed to improve the signal
to signal + noise ratio (SSNR) of the event related responses. Xdawn was
originally designed for P300 evoked potential by enhancing the target
response with respect to the non-target response. This implementation is a
generalization to any type of event related response.
.. note:: _XdawnTransformer does not correct for epochs overlap. To correct
overlaps see ``Xdawn``.
Parameters
----------
n_components : int (default 2)
The number of components to decompose the signals.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed to ``method`` to
:func:`mne.compute_covariance`.
signal_cov : None | Covariance | array, shape (n_channels, n_channels)
The signal covariance used for whitening of the data.
if None, the covariance is estimated from the epochs signal.
method_params : dict | None
Parameters to pass to :func:`mne.compute_covariance`.
.. versionadded:: 0.16
Attributes
----------
classes_ : array, shape (n_classes)
The event indices of the classes.
filters_ : array, shape (n_channels, n_channels)
The Xdawn components used to decompose the data for each event type.
patterns_ : array, shape (n_channels, n_channels)
The Xdawn patterns used to restore the signals for each event type.
"""
def __init__(self, n_components=2, reg=None, signal_cov=None,
method_params=None):
"""Init."""
self.n_components = n_components
self.signal_cov = signal_cov
self.reg = reg
self.method_params = method_params
def fit(self, X, y=None):
"""Fit Xdawn spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_samples)
The target data.
y : array, shape (n_epochs,) | None
The target labels. If None, Xdawn fit on the average evoked.
Returns
-------
self : Xdawn instance
The Xdawn instance.
"""
X, y = self._check_Xy(X, y)
# Main function
self.classes_ = np.unique(y)
self.filters_, self.patterns_, _ = _fit_xdawn(
X, y, n_components=self.n_components, reg=self.reg,
signal_cov=self.signal_cov, method_params=self.method_params)
return self
def transform(self, X):
"""Transform data with spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_samples)
The target data.
Returns
-------
X : array, shape (n_epochs, n_components * n_classes, n_samples)
The transformed data.
"""
X, _ = self._check_Xy(X)
# Check size
if self.filters_.shape[1] != X.shape[1]:
raise ValueError('X must have %i channels, got %i instead.' % (
self.filters_.shape[1], X.shape[1]))
# Transform
X = np.dot(self.filters_, X)
X = X.transpose((1, 0, 2))
return X
def inverse_transform(self, X):
"""Remove selected components from the signal.
Given the unmixing matrix, transform data, zero out components,
and inverse transform the data. This procedure will reconstruct
the signals from which the dynamics described by the excluded
components is subtracted.
Parameters
----------
X : array, shape (n_epochs, n_components * n_classes, n_times)
The transformed data.
Returns
-------
X : array, shape (n_epochs, n_channels * n_classes, n_times)
The inverse transform data.
"""
# Check size
X, _ = self._check_Xy(X)
n_components, n_channels = self.patterns_.shape
n_epochs, n_comp, n_times = X.shape
if n_comp != (self.n_components * len(self.classes_)):
raise ValueError('X must have %i components, got %i instead' % (
self.n_components * len(self.classes_), n_comp))
# Transform
return np.dot(self.patterns_.T, X).transpose(1, 0, 2)
def _check_Xy(self, X, y=None):
"""Check X and y types and dimensions."""
# Check data
if not isinstance(X, np.ndarray) or X.ndim != 3:
raise ValueError('X must be an array of shape (n_epochs, '
'n_channels, n_samples).')
if y is None:
y = np.ones(len(X))
y = np.asarray(y)
if len(X) != len(y):
raise ValueError('X and y must have the same length')
return X, y
class Xdawn(_XdawnTransformer):
"""Implementation of the Xdawn Algorithm.
Xdawn [1]_ [2]_ is a spatial filtering method designed to improve the
signal to signal + noise ratio (SSNR) of the ERP responses. Xdawn was
originally designed for P300 evoked potential by enhancing the target
response with respect to the non-target response. This implementation
is a generalization to any type of ERP.
Parameters
----------
n_components : int (default 2)
The number of components to decompose the signals.
signal_cov : None | Covariance | ndarray, shape (n_channels, n_channels)
(default None). The signal covariance used for whitening of the data.
if None, the covariance is estimated from the epochs signal.
correct_overlap : 'auto' or bool (default 'auto')
Compute the independent evoked responses per condition, while
correcting for event overlaps if any. If 'auto', then
overlapp_correction = True if the events do overlap.
reg : float | str | None (default None)
If not None (same as ``'empirical'``, default), allow
regularization for covariance estimation.
If float, shrinkage is used (0 <= shrinkage <= 1).
For str options, ``reg`` will be passed as ``method`` to
:func:`mne.compute_covariance`.
Attributes
----------
filters_ : dict of ndarray
If fit, the Xdawn components used to decompose the data for each event
type, else empty.
patterns_ : dict of ndarray
If fit, the Xdawn patterns used to restore the signals for each event
type, else empty.
evokeds_ : dict of evoked instance
If fit, the evoked response for each event type.
event_id_ : dict of event id
The event id.
correct_overlap_ : bool
Whether overlap correction was applied.
Notes
-----
.. versionadded:: 0.10
See Also
--------
mne.decoding.CSP, mne.decoding.SPoC
References
----------
.. [1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to
brain-computer interface. Biomedical Engineering, IEEE Transactions
on, 56(8), 2035-2043.
.. [2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J.
(2011, August). Theoretical analysis of xDAWN algorithm:
application to an efficient sensor selection in a P300 BCI. In
Signal Processing Conference, 2011 19th European (pp. 1382-1386).
IEEE.
"""
def __init__(self, n_components=2, signal_cov=None, correct_overlap='auto',
reg=None):
"""Init."""
super(Xdawn, self).__init__(n_components=n_components,
signal_cov=signal_cov, reg=reg)
if correct_overlap not in ['auto', True, False]:
raise ValueError('correct_overlap must be a bool or "auto"')
self.correct_overlap = correct_overlap
def fit(self, epochs, y=None):
"""Fit Xdawn from epochs.
Parameters
----------
epochs : Epochs object
An instance of Epoch on which Xdawn filters will be fitted.
y : ndarray | None (default None)
If None, used epochs.events[:, 2].
Returns
-------
self : Xdawn instance
The Xdawn instance.
"""
# Check data
if not isinstance(epochs, BaseEpochs):
raise ValueError('epochs must be an Epochs object.')
picks = _pick_data_channels(epochs.info)
use_info = pick_info(epochs.info, picks)
X = epochs.get_data()[:, picks, :]
y = epochs.events[:, 2] if y is None else y
self.event_id_ = epochs.event_id
# Check that no baseline was applied with correct overlap
correct_overlap = self.correct_overlap
if correct_overlap == 'auto':
# Events are overlapped if the minimal inter-stimulus
# interval is smaller than the time window.
isi = np.diff(np.sort(epochs.events[:, 0]))
window = int((epochs.tmax - epochs.tmin) * epochs.info['sfreq'])
correct_overlap = isi.min() < window
if epochs.baseline and correct_overlap:
raise ValueError('Cannot apply correct_overlap if epochs'
' were baselined.')
events, tmin, sfreq = None, 0., 1.
if correct_overlap:
events = epochs.events
tmin = epochs.tmin
sfreq = epochs.info['sfreq']
self.correct_overlap_ = correct_overlap
# Note: In this original version of Xdawn we compute and keep all
# components. The selection comes at transform().
n_components = X.shape[1]
# Main fitting function
filters, patterns, evokeds = _fit_xdawn(
X, y, n_components=n_components, reg=self.reg,
signal_cov=self.signal_cov, events=events, tmin=tmin, sfreq=sfreq,
method_params=self.method_params, info=use_info)
# Re-order filters and patterns according to event_id
filters = filters.reshape(-1, n_components, filters.shape[-1])
patterns = patterns.reshape(-1, n_components, patterns.shape[-1])
self.filters_, self.patterns_, self.evokeds_ = dict(), dict(), dict()
idx = np.argsort([value for _, value in iteritems(epochs.event_id)])
for eid, this_filter, this_pattern, this_evo in zip(
epochs.event_id, filters[idx], patterns[idx], evokeds[idx]):
self.filters_[eid] = this_filter.T
self.patterns_[eid] = this_pattern.T
n_events = len(epochs[eid])
evoked = EvokedArray(this_evo, use_info, tmin=epochs.tmin,
comment=eid, nave=n_events)
self.evokeds_[eid] = evoked
return self
def transform(self, inst):
"""Apply Xdawn dim reduction.
Parameters
----------
inst : Epochs | Evoked | ndarray, shape ([n_epochs, ]n_channels, n_times)
Data on which Xdawn filters will be applied.
Returns
-------
X : ndarray, shape ([n_epochs, ]n_components * n_event_types, n_times)
Spatially filtered signals.
""" # noqa: E501
if isinstance(inst, BaseEpochs):
X = inst.get_data()
elif isinstance(inst, Evoked):
X = inst.data
elif isinstance(inst, np.ndarray):
X = inst
if X.ndim not in (2, 3):
raise ValueError('X must be 2D or 3D, got %s' % (X.ndim,))
else:
raise ValueError('Data input must be of Epoch type or numpy array')
filters = [filt[:self.n_components]
for filt in itervalues(self.filters_)]
filters = np.concatenate(filters, axis=0)
X = np.dot(filters, X)
if X.ndim == 3:
X = X.transpose((1, 0, 2))
return X
def apply(self, inst, event_id=None, include=None, exclude=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform data,
zero out components, and inverse transform the data.
This procedure will reconstruct the signals from which
the dynamics described by the excluded components is subtracted.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
The data to be processed.
event_id : dict | list of str | None (default None)
The kind of event to apply. if None, a dict of inst will be return
one for each type of event xdawn has been fitted.
include : array_like of int | None (default None)
The indices referring to columns in the ummixing matrix. The
components to be kept. If None, the first n_components (as defined
in the Xdawn constructor) will be kept.
exclude : array_like of int | None (default None)
The indices referring to columns in the ummixing matrix. The
components to be zeroed out. If None, all the components except the
first n_components will be exclude.
Returns
-------
out : dict of instance
A dict of instance (from the same type as inst input) for each
event type in event_id.
"""
if event_id is None:
event_id = self.event_id_
if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):
raise ValueError('Data input must be Raw, Epochs or Evoked type')
picks = _pick_data_channels(inst.info)
# Define the components to keep
default_exclude = list(range(self.n_components, len(inst.ch_names)))
if exclude is None:
exclude = default_exclude
else:
exclude = list(set(list(default_exclude) + list(exclude)))
if isinstance(inst, BaseRaw):
out = self._apply_raw(raw=inst, include=include, exclude=exclude,
event_id=event_id, picks=picks)
elif isinstance(inst, BaseEpochs):
out = self._apply_epochs(epochs=inst, include=include, picks=picks,
exclude=exclude, event_id=event_id)
elif isinstance(inst, Evoked):
out = self._apply_evoked(evoked=inst, include=include, picks=picks,
exclude=exclude, event_id=event_id)
return out
def _apply_raw(self, raw, include, exclude, event_id, picks):
"""Aux method."""
if not raw.preload:
raise ValueError('Raw data must be preloaded to apply Xdawn')
raws = dict()
for eid in event_id:
data = raw[picks, :][0]
data = self._pick_sources(data, include, exclude, eid)
raw_r = raw.copy()
raw_r[picks, :] = data
raws[eid] = raw_r
return raws
def _apply_epochs(self, epochs, include, exclude, event_id, picks):
"""Aux method."""
if not epochs.preload:
raise ValueError('Epochs must be preloaded to apply Xdawn')
# special case where epochs come picked but fit was 'unpicked'.
epochs_dict = dict()
data = np.hstack(epochs.get_data()[:, picks])
for eid in event_id:
data_r = self._pick_sources(data, include, exclude, eid)
data_r = np.array(np.split(data_r, len(epochs.events), 1))
epochs_r = epochs.copy().load_data()
epochs_r._data[:, picks, :] = data_r
epochs_dict[eid] = epochs_r
return epochs_dict
def _apply_evoked(self, evoked, include, exclude, event_id, picks):
"""Aux method."""
data = evoked.data[picks]
evokeds = dict()
for eid in event_id:
data_r = self._pick_sources(data, include, exclude, eid)
evokeds[eid] = evoked.copy()
# restore evoked
evokeds[eid].data[picks] = data_r
return evokeds
def _pick_sources(self, data, include, exclude, eid):
"""Aux method."""
logger.info('Transforming to Xdawn space')
# Apply unmixing
sources = np.dot(self.filters_[eid].T, data)
if include not in (None, list()):
mask = np.ones(len(sources), dtype=np.bool)
mask[np.unique(include)] = False
sources[mask] = 0.
logger.info('Zeroing out %i Xdawn components' % mask.sum())
elif exclude not in (None, list()):
exclude_ = np.unique(exclude)
sources[exclude_] = 0.
logger.info('Zeroing out %i Xdawn components' % len(exclude_))
logger.info('Inverse transforming to sensor space')
data = np.dot(self.patterns_[eid], sources)
return data
def inverse_transform(self):
"""Not implemented, see Xdawn.apply() instead."""
# Exists because of _XdawnTransformer
raise NotImplementedError('See Xdawn.apply()')
|
|
# -*- coding:utf-8 -*-
# This demo is based on O'reilly's website of Introduction to the t-SNE Algorithm
__author__ = "JOHNKYON"
# numpy and scipy imports
import numpy as np
from numpy import linalg
from numpy.linalg import norm
from scipy.spatial.distance import squareform, pdist
# sklearn(Everyone is using this)
import sklearn
from sklearn.manifold import TSNE
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
# Code in sklearn 0.15.2
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold.t_sne import (_joint_probabilities, _kl_divergence)
from sklearn.utils.extmath import _ravel
# Random state
RS = 20160510
# matplotlib for graphics
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
import matplotlib
# Import seaborn to make nice plots
import seaborn as sns
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context('notebook', font_scale=1.5, rc={"line.linewidth": 2.5})
# Generate an animation with matplotkib and moviepy
from moviepy.video.io.bindings import mplfig_to_npimage
import moviepy.editor as mpy
# load digits
digits = load_digits()
digits.data.shape
# print(digits['DESCR'])
nrows, ncols = 2, 5
plt.figure(figsize=(6, 3))
plt.gray()
for i in range(ncols * nrows):
ax = plt.subplot(nrows, ncols, i + 1)
ax.matshow(digits.images[i, ...])
plt.xticks([]);
plt.yticks([])
plt.title(digits.target[i])
plt.savefig('images/digits_generated.png', dpi=150)
# reorder the data points according to labels
X = np.vstack([digits.data[digits.target == i]
for i in range(10)])
y = np.hstack([digits.target[digits.target == i]
for i in range(10)])
# digits_proj = TSNE(random_state=RS).fit_transform(X)
def scatter(x, colors):
# color palette with seaborn
palette = np.array(sns.color_palette("hls", 10))
# Create a scatter plot
f = plt.figure(figsize=(8, 8))
ax = plt.subplot(aspect='equal')
sc = ax.scatter(x[:, 0], x[:, 1], lw=0, s=40,
c=palette[colors.astype(np.int)])
plt.xlim(-25, 25)
plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
# Add labels for each digit
txts = []
for i in range(10):
# Position of each label
xtext, ytext = np.median(x[colors == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=24)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()
])
txts.append(txt)
return f, ax, sc, txts
# scatter(digits_proj, y)
# plt.savefig('images/digits_tsne-generated.png', dpi=120)
# compute similarity matrix
def _joint_probabilities_constant_sigma(D, sigma):
# print D
P = np.exp(-D ** 2 / 2 * sigma ** 2)
P /= np.sum(P, axis=1)
return P
# # Pairwise distances between all data points
# D = pairwise_distances(X, squared=True)
# # Similarity with constant sigma
# P_constant = _joint_probabilities_constant_sigma(D, .002)
# # Similarity with variable sigma
# P_binary = _joint_probabilities(D, 30., False)
# # The output of this function needs to be reshaped to a square matrix
# P_binary_s = squareform(P_binary)
#
# plt.figure(figsize=(12, 4))
# pal = sns.light_palette("blue", as_cmap=True)
#
# plt.subplot(131)
# plt.imshow(D[::10, ::10], interpolation='none', cmap=pal)
# plt.axis('off')
# plt.title("Distance matrix", fontdict={'fontsize': 16})
#
# plt.subplot(133)
# plt.imshow(P_binary_s[::10, ::10], interpolation='none', cmap=pal)
# plt.axis('off')
# plt.title('$p_{j|i}$ (variable $\sigma$)', fontdict={'fontsize': 16})
# plt.savefig('images/similarity-generated.png', dpi=120)
# This list will contain the positions of the map points at every iteration
positions = []
def _gradient_descent(objective, p0, it, n_iter, n_iter_check,
kwargs, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=[],objective_error=None):
# The documentation of this function can be found in scikit-learn's code.
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
# We save the current position.
positions.append(p.copy())
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
break
if min_grad_norm >= grad_norm:
break
if min_error_diff >= error_diff:
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
return p, error, i
sklearn.manifold.t_sne._gradient_descent = _gradient_descent
X_proj = TSNE(random_state=RS).fit_transform(X)
X_iter = np.dstack(position.reshape(-1, 2) for position in positions)
f, ax, sc, txts = scatter(X_iter[..., -1], y)
def make_frame_mpl(t):
i = int(t*40)
x = X_iter[..., i]
sc.set_offsets(x)
for j, txt in zip(range(10), txts):
xtext, ytext = np.median(x[y == j, :], axis=0)
txt.set_x(xtext)
txt.set_y(ytext)
return mplfig_to_npimage(f)
npoints = 1000
plt.figure(figsize=(15, 4))
for i, D in enumerate((2, 5, 10)):
# Normally distributed points.
u = np.random.randn(npoints, D)
# Now on the sphere.
u /= norm(u, axis=1)[:, None]
# Uniform radius.
r = np.random.rand(npoints, 1)
# Uniformly within the ball.
points = u * r**(1./D)
# Plot.
ax = plt.subplot(1, 3, i+1)
ax.set_xlabel('Ball radius')
if i == 0:
ax.set_ylabel('Distance from origin')
ax.hist(norm(points, axis=1),
bins=np.linspace(0., 1., 50))
ax.set_title('D=%d' % D, loc='left')
plt.savefig('images/spheres-generated.png', dpi=100, bbox_inches='tight')
|
|
# This script is licensed as public domain.
bl_info = {
"name": "Export Inter-Quake Model (.iqm/.iqe)",
"author": "Lee Salzman",
"version": (2019, 4, 24),
"blender": (2, 80, 0),
"location": "File > Export > Inter-Quake Model",
"description": "Export to the Inter-Quake Model format (.iqm/.iqe)",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"}
import os, struct, math
import mathutils
import bpy
import bpy_extras.io_utils
IQM_POSITION = 0
IQM_TEXCOORD = 1
IQM_NORMAL = 2
IQM_TANGENT = 3
IQM_BLENDINDEXES = 4
IQM_BLENDWEIGHTS = 5
IQM_COLOR = 6
IQM_CUSTOM = 0x10
IQM_BYTE = 0
IQM_UBYTE = 1
IQM_SHORT = 2
IQM_USHORT = 3
IQM_INT = 4
IQM_UINT = 5
IQM_HALF = 6
IQM_FLOAT = 7
IQM_DOUBLE = 8
IQM_LOOP = 1
IQM_HEADER = struct.Struct('<16s27I')
IQM_MESH = struct.Struct('<6I')
IQM_TRIANGLE = struct.Struct('<3I')
IQM_JOINT = struct.Struct('<Ii10f')
IQM_POSE = struct.Struct('<iI20f')
IQM_ANIMATION = struct.Struct('<3IfI')
IQM_VERTEXARRAY = struct.Struct('<5I')
IQM_BOUNDS = struct.Struct('<8f')
MAXVCACHE = 32
class Vertex:
def __init__(self, index, coord, normal, uv, weights, color):
self.index = index
self.coord = coord
self.normal = normal
self.uv = uv
self.weights = weights
self.color = color
def normalizeWeights(self):
# renormalizes all weights such that they add up to 255
# the list is chopped/padded to exactly 4 weights if necessary
if not self.weights:
self.weights = [ (0, 0), (0, 0), (0, 0), (0, 0) ]
return
self.weights.sort(key = lambda weight: weight[0], reverse=True)
if len(self.weights) > 4:
del self.weights[4:]
totalweight = sum([ weight for (weight, bone) in self.weights])
if totalweight > 0:
self.weights = [ (int(round(weight * 255.0 / totalweight)), bone) for (weight, bone) in self.weights]
while len(self.weights) > 1 and self.weights[-1][0] <= 0:
self.weights.pop()
else:
totalweight = len(self.weights)
self.weights = [ (int(round(255.0 / totalweight)), bone) for (weight, bone) in self.weights]
totalweight = sum([ weight for (weight, bone) in self.weights])
while totalweight != 255:
for i, (weight, bone) in enumerate(self.weights):
if totalweight > 255 and weight > 0:
self.weights[i] = (weight - 1, bone)
totalweight -= 1
elif totalweight < 255 and weight < 255:
self.weights[i] = (weight + 1, bone)
totalweight += 1
while len(self.weights) < 4:
self.weights.append((0, self.weights[-1][1]))
def calcScore(self):
if self.uses:
self.score = 2.0 * pow(len(self.uses), -0.5)
if self.cacherank >= 3:
self.score += pow(1.0 - float(self.cacherank - 3)/MAXVCACHE, 1.5)
elif self.cacherank >= 0:
self.score += 0.75
else:
self.score = -1.0
def neighborKey(self, other):
if self.coord < other.coord:
return (self.coord.x, self.coord.y, self.coord.z, other.coord.x, other.coord.y, other.coord.z, tuple(self.weights), tuple(other.weights))
else:
return (other.coord.x, other.coord.y, other.coord.z, self.coord.x, self.coord.y, self.coord.z, tuple(other.weights), tuple(self.weights))
def __hash__(self):
return self.index
def __eq__(self, v):
return self.coord == v.coord and self.normal == v.normal and self.uv == v.uv and self.weights == v.weights and self.color == v.color
class Mesh:
def __init__(self, name, material, verts):
self.name = name
self.material = material
self.verts = [ None for v in verts ]
self.vertmap = {}
self.tris = []
def calcTangents(self):
# See "Tangent Space Calculation" at http://www.terathon.com/code/tangent.html
for v in self.verts:
v.tangent = mathutils.Vector((0.0, 0.0, 0.0))
v.bitangent = mathutils.Vector((0.0, 0.0, 0.0))
for (v0, v1, v2) in self.tris:
dco1 = v1.coord - v0.coord
dco2 = v2.coord - v0.coord
duv1 = v1.uv - v0.uv
duv2 = v2.uv - v0.uv
tangent = dco2*duv1.y - dco1*duv2.y
bitangent = dco2*duv1.x - dco1*duv2.x
if dco2.cross(dco1).dot(bitangent.cross(tangent)) < 0:
tangent.negate()
bitangent.negate()
v0.tangent += tangent
v1.tangent += tangent
v2.tangent += tangent
v0.bitangent += bitangent
v1.bitangent += bitangent
v2.bitangent += bitangent
for v in self.verts:
v.tangent = v.tangent - v.normal*v.tangent.dot(v.normal)
v.tangent.normalize()
if v.normal.cross(v.tangent).dot(v.bitangent) < 0:
v.bitangent = -1.0
else:
v.bitangent = 1.0
def optimize(self):
# Linear-speed vertex cache optimization algorithm by Tom Forsyth
for v in self.verts:
if v:
v.index = -1
v.uses = []
v.cacherank = -1
for i, (v0, v1, v2) in enumerate(self.tris):
v0.uses.append(i)
v1.uses.append(i)
v2.uses.append(i)
for v in self.verts:
if v:
v.calcScore()
besttri = -1
bestscore = -42.0
scores = []
for i, (v0, v1, v2) in enumerate(self.tris):
scores.append(v0.score + v1.score + v2.score)
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
vertloads = 0 # debug info
vertschedule = []
trischedule = []
vcache = []
while besttri >= 0:
tri = self.tris[besttri]
scores[besttri] = -666.0
trischedule.append(tri)
for v in tri:
if v.cacherank < 0: # debug info
vertloads += 1 # debug info
if v.index < 0:
v.index = len(vertschedule)
vertschedule.append(v)
v.uses.remove(besttri)
v.cacherank = -1
v.score = -1.0
vcache = [ v for v in tri if v.uses ] + [ v for v in vcache if v.cacherank >= 0 ]
for i, v in enumerate(vcache):
v.cacherank = i
v.calcScore()
besttri = -1
bestscore = -42.0
for v in vcache:
for i in v.uses:
v0, v1, v2 = self.tris[i]
scores[i] = v0.score + v1.score + v2.score
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
while len(vcache) > MAXVCACHE:
vcache.pop().cacherank = -1
if besttri < 0:
for i, score in enumerate(scores):
if score > bestscore:
besttri = i
bestscore = score
print('%s: %d verts optimized to %d/%d loads for %d entry LRU cache' % (self.name, len(self.verts), vertloads, len(vertschedule), MAXVCACHE))
#print('%s: %d verts scheduled to %d' % (self.name, len(self.verts), len(vertschedule)))
self.verts = vertschedule
# print('%s: %d tris scheduled to %d' % (self.name, len(self.tris), len(trischedule)))
self.tris = trischedule
def meshData(self, iqm):
return [ iqm.addText(self.name), iqm.addText(self.material), self.firstvert, len(self.verts), self.firsttri, len(self.tris) ]
class Bone:
def __init__(self, name, origname, index, parent, matrix):
self.name = name
self.origname = origname
self.index = index
self.parent = parent
self.matrix = matrix
self.localmatrix = matrix
if self.parent:
self.localmatrix = parent.matrix.inverted() @ self.localmatrix
self.numchannels = 0
self.channelmask = 0
self.channeloffsets = [ 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10 ]
self.channelscales = [ -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10 ]
def jointData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
pos = self.localmatrix.to_translation()
orient = self.localmatrix.to_quaternion()
orient.normalize()
if orient.w > 0:
orient.negate()
scale = self.localmatrix.to_scale()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
return [ iqm.addText(self.name), parent, pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z ]
def poseData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
return [ parent, self.channelmask ] + self.channeloffsets + self.channelscales
def calcChannelMask(self):
for i in range(0, 10):
self.channelscales[i] -= self.channeloffsets[i]
if self.channelscales[i] >= 1.0e-10:
self.numchannels += 1
self.channelmask |= 1 << i
self.channelscales[i] /= 0xFFFF
else:
self.channelscales[i] = 0.0
return self.numchannels
class Animation:
def __init__(self, name, frames, fps = 0.0, flags = 0):
self.name = name
self.frames = frames
self.fps = fps
self.flags = flags
def calcFrameLimits(self, bones):
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
bone.channeloffsets[0] = min(bone.channeloffsets[0], loc.x)
bone.channeloffsets[1] = min(bone.channeloffsets[1], loc.y)
bone.channeloffsets[2] = min(bone.channeloffsets[2], loc.z)
bone.channeloffsets[3] = min(bone.channeloffsets[3], quat.x)
bone.channeloffsets[4] = min(bone.channeloffsets[4], quat.y)
bone.channeloffsets[5] = min(bone.channeloffsets[5], quat.z)
bone.channeloffsets[6] = min(bone.channeloffsets[6], quat.w)
bone.channeloffsets[7] = min(bone.channeloffsets[7], scale.x)
bone.channeloffsets[8] = min(bone.channeloffsets[8], scale.y)
bone.channeloffsets[9] = min(bone.channeloffsets[9], scale.z)
bone.channelscales[0] = max(bone.channelscales[0], loc.x)
bone.channelscales[1] = max(bone.channelscales[1], loc.y)
bone.channelscales[2] = max(bone.channelscales[2], loc.z)
bone.channelscales[3] = max(bone.channelscales[3], quat.x)
bone.channelscales[4] = max(bone.channelscales[4], quat.y)
bone.channelscales[5] = max(bone.channelscales[5], quat.z)
bone.channelscales[6] = max(bone.channelscales[6], quat.w)
bone.channelscales[7] = max(bone.channelscales[7], scale.x)
bone.channelscales[8] = max(bone.channelscales[8], scale.y)
bone.channelscales[9] = max(bone.channelscales[9], scale.z)
def animData(self, iqm):
return [ iqm.addText(self.name), self.firstframe, len(self.frames), self.fps, self.flags ]
def frameData(self, bones):
data = b''
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if (bone.channelmask&0x7F) == 0x7F:
lx = int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0]))
ly = int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1]))
lz = int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2]))
qx = int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3]))
qy = int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4]))
qz = int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5]))
qw = int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6]))
data += struct.pack('<7H', lx, ly, lz, qx, qy, qz, qw)
else:
if bone.channelmask & 1:
data += struct.pack('<H', int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0])))
if bone.channelmask & 2:
data += struct.pack('<H', int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1])))
if bone.channelmask & 4:
data += struct.pack('<H', int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2])))
if bone.channelmask & 8:
data += struct.pack('<H', int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3])))
if bone.channelmask & 16:
data += struct.pack('<H', int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4])))
if bone.channelmask & 32:
data += struct.pack('<H', int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5])))
if bone.channelmask & 64:
data += struct.pack('<H', int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6])))
if bone.channelmask & 128:
data += struct.pack('<H', int(round((scale.x - bone.channeloffsets[7]) / bone.channelscales[7])))
if bone.channelmask & 256:
data += struct.pack('<H', int(round((scale.y - bone.channeloffsets[8]) / bone.channelscales[8])))
if bone.channelmask & 512:
data += struct.pack('<H', int(round((scale.z - bone.channeloffsets[9]) / bone.channelscales[9])))
return data
def frameBoundsData(self, bones, meshes, frame, invbase):
bbmin = bbmax = None
xyradius = 0.0
radius = 0.0
transforms = []
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if bone.parent:
mat = transforms[bone.parent.index] @ mat
transforms.append(mat)
for i, mat in enumerate(transforms):
transforms[i] = mat @ invbase[i]
for mesh in meshes:
for v in mesh.verts:
pos = mathutils.Vector((0.0, 0.0, 0.0))
for (weight, bone) in v.weights:
if weight > 0:
pos += (transforms[bone] @ v.coord) * (weight / 255.0)
if bbmin:
bbmin.x = min(bbmin.x, pos.x)
bbmin.y = min(bbmin.y, pos.y)
bbmin.z = min(bbmin.z, pos.z)
bbmax.x = max(bbmax.x, pos.x)
bbmax.y = max(bbmax.y, pos.y)
bbmax.z = max(bbmax.z, pos.z)
else:
bbmin = pos.copy()
bbmax = pos.copy()
pradius = pos.x*pos.x + pos.y*pos.y
if pradius > xyradius:
xyradius = pradius
pradius += pos.z*pos.z
if pradius > radius:
radius = pradius
if bbmin:
xyradius = math.sqrt(xyradius)
radius = math.sqrt(radius)
else:
bbmin = bbmax = mathutils.Vector((0.0, 0.0, 0.0))
return IQM_BOUNDS.pack(bbmin.x, bbmin.y, bbmin.z, bbmax.x, bbmax.y, bbmax.z, xyradius, radius)
def boundsData(self, bones, meshes):
invbase = []
for bone in bones:
invbase.append(bone.matrix.inverted())
data = b''
for i, frame in enumerate(self.frames):
print('Calculating bounding box for %s:%d' % (self.name, i))
data += self.frameBoundsData(bones, meshes, frame, invbase)
return data
class IQMFile:
def __init__(self):
self.textoffsets = {}
self.textdata = b''
self.meshes = []
self.meshdata = []
self.numverts = 0
self.numtris = 0
self.joints = []
self.jointdata = []
self.numframes = 0
self.framesize = 0
self.anims = []
self.posedata = []
self.animdata = []
self.framedata = []
self.vertdata = []
def addText(self, str):
if not self.textdata:
self.textdata += b'\x00'
self.textoffsets[''] = 0
try:
return self.textoffsets[str]
except:
offset = len(self.textdata)
self.textoffsets[str] = offset
self.textdata += bytes(str, encoding="utf8") + b'\x00'
return offset
def addJoints(self, bones):
for bone in bones:
self.joints.append(bone)
if self.meshes:
self.jointdata.append(bone.jointData(self))
def addMeshes(self, meshes):
self.meshes += meshes
for mesh in meshes:
mesh.firstvert = self.numverts
mesh.firsttri = self.numtris
self.meshdata.append(mesh.meshData(self))
self.numverts += len(mesh.verts)
self.numtris += len(mesh.tris)
def addAnims(self, anims):
self.anims += anims
for anim in anims:
anim.firstframe = self.numframes
self.animdata.append(anim.animData(self))
self.numframes += len(anim.frames)
def calcFrameSize(self):
for anim in self.anims:
anim.calcFrameLimits(self.joints)
self.framesize = 0
for joint in self.joints:
self.framesize += joint.calcChannelMask()
for joint in self.joints:
if self.anims:
self.posedata.append(joint.poseData(self))
print('Exporting %d frames of size %d' % (self.numframes, self.framesize))
def writeVerts(self, file, offset):
if self.numverts <= 0:
return
file.write(IQM_VERTEXARRAY.pack(IQM_POSITION, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TEXCOORD, 0, IQM_FLOAT, 2, offset))
offset += self.numverts * struct.calcsize('<2f')
file.write(IQM_VERTEXARRAY.pack(IQM_NORMAL, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TANGENT, 0, IQM_FLOAT, 4, offset))
offset += self.numverts * struct.calcsize('<4f')
if self.joints:
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDINDEXES, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDWEIGHTS, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in self.meshes)
if hascolors:
file.write(IQM_VERTEXARRAY.pack(IQM_COLOR, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.coord))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<2f', *v.uv))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.normal))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4f', v.tangent.x, v.tangent.y, v.tangent.z, v.bitangent))
if self.joints:
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][1], v.weights[1][1], v.weights[2][1], v.weights[3][1]))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][0], v.weights[1][0], v.weights[2][0], v.weights[3][0]))
if hascolors:
for mesh in self.meshes:
for v in mesh.verts:
if v.color:
file.write(struct.pack('<4B', v.color[0], v.color[1], v.color[2], v.color[3]))
else:
file.write(struct.pack('<4B', 0, 0, 0, 255))
def calcNeighbors(self):
edges = {}
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = v0.neighborKey(v1)
e1 = v1.neighborKey(v2)
e2 = v2.neighborKey(v0)
tri = mesh.firsttri + i
try: edges[e0].append(tri)
except: edges[e0] = [tri]
try: edges[e1].append(tri)
except: edges[e1] = [tri]
try: edges[e2].append(tri)
except: edges[e2] = [tri]
neighbors = []
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = edges[v0.neighborKey(v1)]
e1 = edges[v1.neighborKey(v2)]
e2 = edges[v2.neighborKey(v0)]
tri = mesh.firsttri + i
match0 = match1 = match2 = -1
if len(e0) == 2: match0 = e0[e0.index(tri)^1]
if len(e1) == 2: match1 = e1[e1.index(tri)^1]
if len(e2) == 2: match2 = e2[e2.index(tri)^1]
neighbors.append((match0, match1, match2))
self.neighbors = neighbors
def writeTris(self, file):
for mesh in self.meshes:
for (v0, v1, v2) in mesh.tris:
file.write(struct.pack('<3I', v0.index + mesh.firstvert, v1.index + mesh.firstvert, v2.index + mesh.firstvert))
for (n0, n1, n2) in self.neighbors:
if n0 < 0: n0 = 0xFFFFFFFF
if n1 < 0: n1 = 0xFFFFFFFF
if n2 < 0: n2 = 0xFFFFFFFF
file.write(struct.pack('<3I', n0, n1, n2))
def export(self, file, usebbox = True):
self.filesize = IQM_HEADER.size
if self.textdata:
while len(self.textdata) % 4:
self.textdata += b'\x00'
ofs_text = self.filesize
self.filesize += len(self.textdata)
else:
ofs_text = 0
if self.meshdata:
ofs_meshes = self.filesize
self.filesize += len(self.meshdata) * IQM_MESH.size
else:
ofs_meshes = 0
if self.numverts > 0:
ofs_vertexarrays = self.filesize
num_vertexarrays = 4
if self.joints:
num_vertexarrays += 2
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in self.meshes)
if hascolors:
num_vertexarrays += 1
self.filesize += num_vertexarrays * IQM_VERTEXARRAY.size
ofs_vdata = self.filesize
self.filesize += self.numverts * struct.calcsize('<3f2f3f4f')
if self.joints:
self.filesize += self.numverts * struct.calcsize('<4B4B')
if hascolors:
self.filesize += self.numverts * struct.calcsize('<4B')
else:
ofs_vertexarrays = 0
num_vertexarrays = 0
ofs_vdata = 0
if self.numtris > 0:
ofs_triangles = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
ofs_neighbors = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
else:
ofs_triangles = 0
ofs_neighbors = 0
if self.jointdata:
ofs_joints = self.filesize
self.filesize += len(self.jointdata) * IQM_JOINT.size
else:
ofs_joints = 0
if self.posedata:
ofs_poses = self.filesize
self.filesize += len(self.posedata) * IQM_POSE.size
else:
ofs_poses = 0
if self.animdata:
ofs_anims = self.filesize
self.filesize += len(self.animdata) * IQM_ANIMATION.size
else:
ofs_anims = 0
falign = 0
if self.framesize * self.numframes > 0:
ofs_frames = self.filesize
self.filesize += self.framesize * self.numframes * struct.calcsize('<H')
falign = (4 - (self.filesize % 4)) % 4
self.filesize += falign
else:
ofs_frames = 0
if usebbox and self.numverts > 0 and self.numframes > 0:
ofs_bounds = self.filesize
self.filesize += self.numframes * IQM_BOUNDS.size
else:
ofs_bounds = 0
file.write(IQM_HEADER.pack('INTERQUAKEMODEL'.encode('ascii'), 2, self.filesize, 0, len(self.textdata), ofs_text, len(self.meshdata), ofs_meshes, num_vertexarrays, self.numverts, ofs_vertexarrays, self.numtris, ofs_triangles, ofs_neighbors, len(self.jointdata), ofs_joints, len(self.posedata), ofs_poses, len(self.animdata), ofs_anims, self.numframes, self.framesize, ofs_frames, ofs_bounds, 0, 0, 0, 0))
file.write(self.textdata)
for mesh in self.meshdata:
file.write(IQM_MESH.pack(*mesh))
self.writeVerts(file, ofs_vdata)
self.writeTris(file)
for joint in self.jointdata:
file.write(IQM_JOINT.pack(*joint))
for pose in self.posedata:
file.write(IQM_POSE.pack(*pose))
for anim in self.animdata:
file.write(IQM_ANIMATION.pack(*anim))
for anim in self.anims:
file.write(anim.frameData(self.joints))
file.write(b'\x00' * falign)
if usebbox and self.numverts > 0 and self.numframes > 0:
for anim in self.anims:
file.write(anim.boundsData(self.joints, self.meshes))
def findArmature(context):
armature = None
for obj in context.selected_objects:
if obj.type == 'ARMATURE':
armature = obj
break
if not armature:
for obj in context.selected_objects:
if obj.type == 'MESH':
armature = obj.find_armature()
if armature:
break
return armature
def derigifyBones(context, armature, scale):
data = armature.data
defnames = []
orgbones = {}
defbones = {}
org2defs = {}
def2org = {}
defparent = {}
defchildren = {}
for bone in data.bones.values():
if bone.name.startswith('ORG-'):
orgbones[bone.name[4:]] = bone
org2defs[bone.name[4:]] = []
elif bone.name.startswith('DEF-'):
defnames.append(bone.name[4:])
defbones[bone.name[4:]] = bone
defchildren[bone.name[4:]] = []
for name, bone in defbones.items():
orgname = name
orgbone = orgbones.get(orgname)
splitname = -1
if not orgbone:
splitname = name.rfind('.')
suffix = ''
if splitname >= 0 and name[splitname+1:] in [ 'l', 'r', 'L', 'R' ]:
suffix = name[splitname:]
splitname = name.rfind('.', 0, splitname)
if splitname >= 0 and name[splitname+1:splitname+2].isdigit():
orgname = name[:splitname] + suffix
orgbone = orgbones.get(orgname)
org2defs[orgname].append(name)
def2org[name] = orgname
for defs in org2defs.values():
defs.sort()
for name in defnames:
bone = defbones[name]
orgname = def2org[name]
orgbone = orgbones.get(orgname)
defs = org2defs[orgname]
if orgbone:
i = defs.index(name)
if i == 0:
orgparent = orgbone.parent
if orgparent and orgparent.name.startswith('ORG-'):
orgpname = orgparent.name[4:]
defparent[name] = org2defs[orgpname][-1]
else:
defparent[name] = defs[i-1]
if name in defparent:
defchildren[defparent[name]].append(name)
bones = {}
worldmatrix = armature.matrix_world
worklist = [ bone for bone in defnames if bone not in defparent ]
for index, bname in enumerate(worklist):
bone = defbones[bname]
bonematrix = worldmatrix @ bone.matrix_local
if scale != 1.0:
bonematrix.translation *= scale
bones[bone.name] = Bone(bname, bone.name, index, bname in defparent and bones.get(defbones[defparent[bname]].name), bonematrix)
worklist.extend(defchildren[bname])
print('De-rigified %d bones' % len(worklist))
return bones
def collectBones(context, armature, scale):
data = armature.data
bones = {}
worldmatrix = armature.matrix_world
worklist = [ bone for bone in data.bones.values() if not bone.parent ]
for index, bone in enumerate(worklist):
bonematrix = worldmatrix @ bone.matrix_local
if scale != 1.0:
bonematrix.translation *= scale
bones[bone.name] = Bone(bone.name, bone.name, index, bone.parent and bones.get(bone.parent.name), bonematrix)
for child in bone.children:
if child not in worklist:
worklist.append(child)
print('Collected %d bones' % len(worklist))
return bones
def collectAnim(context, armature, scale, bones, action, startframe = None, endframe = None):
if not startframe or not endframe:
startframe, endframe = action.frame_range
startframe = int(startframe)
endframe = int(endframe)
print('Exporting action "%s" frames %d-%d' % (action.name, startframe, endframe))
scene = context.scene
worldmatrix = armature.matrix_world
armature.animation_data.action = action
outdata = []
for time in range(startframe, endframe+1):
scene.frame_set(time)
pose = armature.pose
outframe = []
for bone in bones:
posematrix = pose.bones[bone.origname].matrix
if bone.parent:
posematrix = pose.bones[bone.parent.origname].matrix.inverted() @ posematrix
else:
posematrix = worldmatrix @ posematrix
if scale != 1.0:
posematrix.translation *= scale
loc = posematrix.to_translation()
quat = posematrix.to_3x3().inverted().transposed().to_quaternion()
quat.normalize()
if quat.w > 0:
quat.negate()
pscale = posematrix.to_scale()
pscale.x = round(pscale.x*0x10000)/0x10000
pscale.y = round(pscale.y*0x10000)/0x10000
pscale.z = round(pscale.z*0x10000)/0x10000
outframe.append((loc, quat, pscale, posematrix))
outdata.append(outframe)
return outdata
def collectAnims(context, armature, scale, bones, animspecs, allAnimations):
if not armature.animation_data:
print('Armature has no animation data')
return []
actions = bpy.data.actions
animspecs = [ spec.strip() for spec in animspecs.split(',') ]
anims = []
scene = context.scene
oldaction = armature.animation_data.action
oldframe = scene.frame_current
print('Exporting animations')
if allAnimations:
for action in actions:
fps = float(scene.render.fps)
framedata = collectAnim(context, armature, scale, bones, action, None, None)
anims.append(Animation(action.name, framedata, fps, 0))
print('Exported action "%s"' % action.name)
else:
for animspec in animspecs:
animspec = [ arg.strip() for arg in animspec.split(':') ]
animname = animspec[0]
if animname not in actions:
print('Action "%s" not found in current armature' % animname)
continue
try:
startframe = int(animspec[1])
except:
startframe = None
try:
endframe = int(animspec[2])
except:
endframe = None
try:
fps = float(animspec[3])
except:
fps = float(scene.render.fps)
try:
flags = int(animspec[4])
except:
flags = 0
framedata = collectAnim(context, armature, scale, bones, actions[animname], startframe, endframe)
anims.append(Animation(animname, framedata, fps, flags))
armature.animation_data.action = oldaction
scene.frame_set(oldframe)
return anims
def collectMeshes(context, bones, scale, matfun, useskel = True, usecol = False, usemods = False, filetype = 'IQM'):
vertwarn = []
objs = context.selected_objects #context.scene.objects
meshes = []
for obj in objs:
if obj.type == 'MESH':
data = obj.evaluated_get(context.evaluated_depsgraph_get()).to_mesh() if usemods else obj.original.to_mesh()
if not data.polygons:
continue
data.calc_normals_split()
coordmatrix = obj.matrix_world
normalmatrix = coordmatrix.inverted().transposed()
if scale != 1.0:
coordmatrix = mathutils.Matrix.Scale(scale, 4) @ coordmatrix
materials = {}
matnames = {}
groups = obj.vertex_groups
uvlayer = data.uv_layers.active and data.uv_layers.active.data
colors = None
alpha = None
if usecol:
if data.vertex_colors.active:
if data.vertex_colors.active.name.startswith('alpha'):
alpha = data.vertex_colors.active.data
else:
colors = data.vertex_colors.active.data
for layer in data.vertex_colors:
if layer.name.startswith('alpha'):
if not alpha:
alpha = layer.data
elif not colors:
colors = layer.data
if data.materials:
for idx, mat in enumerate(data.materials):
matprefix = mat.name or ''
matimage = ''
if mat.node_tree:
for n in mat.node_tree.nodes:
if n.type == 'TEX_IMAGE' and n.image:
matimage = os.path.basename(n.image.filepath)
break
matnames[idx] = matfun(matprefix, matimage)
for face in data.polygons:
if len(face.vertices) < 3:
continue
if all([ data.vertices[i].co == data.vertices[face.vertices[0]].co for i in face.vertices[1:] ]):
continue
matindex = face.material_index
try:
mesh = materials[obj.name, matindex]
except:
matname = matnames.get(matindex, '')
mesh = Mesh(obj.name, matname, data.vertices)
meshes.append(mesh)
materials[obj.name, matindex] = mesh
verts = mesh.verts
vertmap = mesh.vertmap
faceverts = []
for loopidx in face.loop_indices:
loop = data.loops[loopidx]
v = data.vertices[loop.vertex_index]
vertco = coordmatrix @ v.co
if not face.use_smooth:
vertno = mathutils.Vector(face.normal)
else:
vertno = mathutils.Vector(loop.normal)
vertno = normalmatrix @ vertno
vertno.normalize()
# flip V axis of texture space
if uvlayer:
uv = uvlayer[loopidx].uv
vertuv = mathutils.Vector((uv[0], 1.0 - uv[1]))
else:
vertuv = mathutils.Vector((0.0, 0.0))
if colors:
vertcol = colors[loopidx].color
vertcol = (int(round(vertcol[0] * 255.0)), int(round(vertcol[1] * 255.0)), int(round(vertcol[2] * 255.0)), 255)
else:
vertcol = None
if alpha:
vertalpha = alpha[loopidx].color
if vertcol:
vertcol = (vertcol[0], vertcol[1], vertcol[2], int(round(vertalpha[0] * 255.0)))
else:
vertcol = (255, 255, 255, int(round(vertalpha[0] * 255.0)))
vertweights = []
if useskel:
for g in v.groups:
try:
vertweights.append((g.weight, bones[groups[g.group].name].index))
except:
if (groups[g.group].name, mesh.name) not in vertwarn:
vertwarn.append((groups[g.group].name, mesh.name))
print('Vertex depends on non-existent bone: %s in mesh: %s' % (groups[g.group].name, mesh.name))
if not face.use_smooth:
vertindex = len(verts)
vertkey = Vertex(vertindex, vertco, vertno, vertuv, vertweights, vertcol)
if filetype == 'IQM':
vertkey.normalizeWeights()
mesh.verts.append(vertkey)
faceverts.append(vertkey)
continue
vertkey = Vertex(v.index, vertco, vertno, vertuv, vertweights, vertcol)
if filetype == 'IQM':
vertkey.normalizeWeights()
if not verts[v.index]:
verts[v.index] = vertkey
faceverts.append(vertkey)
elif verts[v.index] == vertkey:
faceverts.append(verts[v.index])
else:
try:
vertindex = vertmap[vertkey]
faceverts.append(verts[vertindex])
except:
vertindex = len(verts)
vertmap[vertkey] = vertindex
verts.append(vertkey)
faceverts.append(vertkey)
# Quake winding is reversed
for i in range(2, len(faceverts)):
mesh.tris.append((faceverts[0], faceverts[i], faceverts[i-1]))
for mesh in meshes:
mesh.optimize()
if filetype == 'IQM':
mesh.calcTangents()
print('%s %s: generated %d triangles' % (mesh.name, mesh.material, len(mesh.tris)))
return meshes
def exportIQE(file, meshes, bones, anims):
file.write('# Inter-Quake Export\n\n')
for bone in bones:
if bone.parent:
parent = bone.parent.index
else:
parent = -1
file.write('joint "%s" %d\n' % (bone.name, parent))
if meshes:
pos = bone.localmatrix.to_translation()
orient = bone.localmatrix.to_quaternion()
orient.normalize()
if orient.w > 0:
orient.negate()
scale = bone.localmatrix.to_scale()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in meshes)
for mesh in meshes:
file.write('\nmesh "%s"\n\tmaterial "%s"\n\n' % (mesh.name, mesh.material))
for v in mesh.verts:
file.write('vp %.8f %.8f %.8f\n\tvt %.8f %.8f\n\tvn %.8f %.8f %.8f\n' % (v.coord.x, v.coord.y, v.coord.z, v.uv.x, v.uv.y, v.normal.x, v.normal.y, v.normal.z))
if bones:
weights = '\tvb'
for weight in v.weights:
weights += ' %d %.8f' % (weight[1], weight[0])
file.write(weights + '\n')
if hascolors:
if v.color:
file.write('\tvc %.8f %.8f %.8f %.8f\n' % (v.color[0] / 255.0, v.color[1] / 255.0, v.color[2] / 255.0, v.color[3] / 255.0))
else:
file.write('\tvc 0 0 0 1\n')
file.write('\n')
for (v0, v1, v2) in mesh.tris:
file.write('fm %d %d %d\n' % (v0.index, v1.index, v2.index))
for anim in anims:
file.write('\nanimation "%s"\n\tframerate %.8f\n' % (anim.name, anim.fps))
if anim.flags&IQM_LOOP:
file.write('\tloop\n')
for frame in anim.frames:
file.write('\nframe\n')
for (pos, orient, scale, mat) in frame:
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
file.write('\n')
def exportIQM(context, filename, usemesh = True, usemods = False, useskel = True, usebbox = True, usecol = False, scale = 1.0, animspecs = None, matfun = (lambda prefix, image: image), derigify = False, boneorder = None, allAnimations = True):
armature = findArmature(context)
if useskel and not armature:
print('No armature selected')
return
if filename.lower().endswith('.iqm'):
filetype = 'IQM'
elif filename.lower().endswith('.iqe'):
filetype = 'IQE'
else:
print('Unknown file type: %s' % filename)
return
if useskel:
if derigify:
bones = derigifyBones(context, armature, scale)
else:
bones = collectBones(context, armature, scale)
else:
bones = {}
if boneorder:
try:
f = open(bpy_extras.io_utils.path_reference(boneorder, os.path.dirname(bpy.data.filepath), os.path.dirname(filename)), "r", encoding = "utf-8")
names = [line.strip() for line in f.readlines()]
f.close()
names = [name for name in names if name in [bone.name for bone in bones.values()]]
if len(names) != len(bones):
print('Bone order (%d) does not match skeleton (%d)' % (len(names), len(bones)))
return
print('Reordering bones')
for bone in bones.values():
bone.index = names.index(bone.name)
except:
print('Failed opening bone order: %s' % boneorder)
return
bonelist = sorted(bones.values(), key = lambda bone: bone.index)
if usemesh:
meshes = collectMeshes(context, bones, scale, matfun, useskel, usecol, usemods, filetype)
else:
meshes = []
if useskel and animspecs or allAnimations:
anims = collectAnims(context, armature, scale, bonelist, animspecs, allAnimations)
else:
anims = []
if filetype == 'IQM':
iqm = IQMFile()
iqm.addMeshes(meshes)
iqm.addJoints(bonelist)
iqm.addAnims(anims)
iqm.calcFrameSize()
iqm.calcNeighbors()
if filename:
try:
if filetype == 'IQM':
file = open(filename, 'wb')
else:
file = open(filename, 'w')
except:
print ('Failed writing to %s' % (filename))
return
if filetype == 'IQM':
iqm.export(file, usebbox)
elif filetype == 'IQE':
exportIQE(file, meshes, bonelist, anims)
file.close()
print('Saved %s file to %s' % (filetype, filename))
else:
print('No %s file was generated' % (filetype))
class ExportIQM(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
'''Export an Inter-Quake Model IQM or IQE file'''
bl_idname = "export.iqm"
bl_label = 'Export IQM'
filename_ext = ".iqm"
allAnimations = bpy.props.BoolProperty(name="All animations", description="Export All animations", default=True)
animspec = bpy.props.StringProperty(name="Animations", description="Animations to export", maxlen=1024, default="")
usemesh = bpy.props.BoolProperty(name="Meshes", description="Generate meshes", default=True)
usemods = bpy.props.BoolProperty(name="Modifiers", description="Apply modifiers", default=True)
useskel = bpy.props.BoolProperty(name="Skeleton", description="Generate skeleton", default=True)
usebbox = bpy.props.BoolProperty(name="Bounding boxes", description="Generate bounding boxes", default=True)
usecol = bpy.props.BoolProperty(name="Vertex colors", description="Export vertex colors", default=False)
usescale = bpy.props.FloatProperty(name="Scale", description="Scale of exported model", default=1.0, min=0.0, step=50, precision=2)
#usetrans = bpy.props.FloatVectorProperty(name="Translate", description="Translate position of exported model", step=50, precision=2, size=3)
matfmt = bpy.props.EnumProperty(name="Materials", description="Material name format", items=[("m+i-e", "material+image-ext", ""), ("m", "material", ""), ("i", "image", "")], default="m+i-e")
derigify = bpy.props.BoolProperty(name="De-rigify", description="Export only deformation bones from rigify", default=False)
boneorder = bpy.props.StringProperty(name="Bone order", description="Override ordering of bones", subtype="FILE_NAME", default="")
def execute(self, context):
if self.properties.matfmt == "m+i-e":
matfun = lambda prefix, image: prefix + os.path.splitext(image)[0]
elif self.properties.matfmt == "m":
matfun = lambda prefix, image: prefix
else:
matfun = lambda prefix, image: image
exportIQM(context, self.properties.filepath, self.properties.usemesh, self.properties.usemods, self.properties.useskel, self.properties.usebbox, self.properties.usecol, self.properties.usescale, self.properties.animspec, matfun, self.properties.derigify, self.properties.boneorder, self.properties.allAnimations)
return {'FINISHED'}
def check(self, context):
filepath = bpy.path.ensure_ext(self.filepath, '.iqm')
filepathalt = bpy.path.ensure_ext(self.filepath, '.iqe')
if filepath != self.filepath and filepathalt != self.filepath:
self.filepath = filepath
return True
return False
def menu_func(self, context):
self.layout.operator(ExportIQM.bl_idname, text="Inter-Quake Model (.iqm, .iqe)")
def register():
bpy.utils.register_class(ExportIQM)
bpy.types.TOPBAR_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_class(ExportIQM)
bpy.types.TOPBAR_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
|
|
# Monary - Copyright 2011-2014 David J. C. Beach
# Please see the included LICENSE.TXT and NOTICE.TXT for licensing information.
import atexit
import copy
import ctypes
import os
import platform
import sys
PY3 = sys.version_info[0] >= 3
if PY3:
# Python 3.
bytes_type = bytes
string_type = str
from urllib.parse import urlencode
else:
# Python 2.6/2.7.
bytes_type = basestring
string_type = basestring
from urllib import urlencode
try:
# if we are using Python 2.7+.
from collections import OrderedDict
except ImportError:
# for Python 2.6 and earlier.
from .ordereddict import OrderedDict
import numpy
import bson
from .write_concern import WriteConcern
cmonary = None
ERROR_LEN = 504
ERROR_ARR = ctypes.c_char * ERROR_LEN
class bson_error_t(ctypes.Structure):
_fields_ = [
("domain", ctypes.c_uint),
("code", ctypes.c_uint),
("message", ERROR_ARR)
]
def get_empty_bson_error():
return bson_error_t(0, 0, "".encode("utf-8"))
class MonaryError(Exception):
pass
def _load_cmonary_lib():
"""Loads the cmonary CDLL library (from the directory containing
this module).
"""
global cmonary
moduledir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if platform.system() == 'Windows':
cmonary_fname = "libcmonary.pyd"
else:
cmonary_fname = "libcmonary.so"
cmonaryfile = None
for root, dirs, files in os.walk(moduledir):
for basename in files:
if basename == cmonary_fname:
cmonaryfile = os.path.join(root, basename)
break
if cmonaryfile is None:
raise RuntimeError("Unable to find cmonary shared library: ",
cmonary_fname)
cmonary = ctypes.CDLL(cmonaryfile)
_load_cmonary_lib()
CTYPE_CODES = {
"P": ctypes.c_void_p, # Pointer
"S": ctypes.c_char_p, # String
"I": ctypes.c_int, # Int
"U": ctypes.c_uint, # Unsigned int
"L": ctypes.c_long, # Long
"B": ctypes.c_bool, # Bool
"0": None, # None/void
}
# List of C function definitions from the cmonary library.
FUNCDEFS = [
# Format: "func_name:arg_types:return_type".
"monary_init::0",
"monary_cleanup::0",
"monary_connect:SP:P",
"monary_disconnect:P:0",
"monary_use_collection:PSS:P",
"monary_destroy_collection:P:0",
"monary_alloc_column_data:UU:P",
"monary_free_column_data:P:I",
"monary_set_column_item:PUSUUPPP:I",
"monary_query_count:PPP:L",
"monary_init_query:PUUPPIP:P",
"monary_init_aggregate:PPPP:P",
"monary_load_query:PP:I",
"monary_close_query:P:0",
"monary_create_write_concern:IIBBS:P",
"monary_destroy_write_concern:P:0",
"monary_insert:PPPPPP:0"
]
MAX_COLUMNS = 1024
MAX_STRING_LENGTH = 1024
def _decorate_cmonary_functions():
"""Decorates each of the cmonary functions with their argument and
result types.
"""
for funcdef in FUNCDEFS:
name, argtypes, restype = funcdef.split(":")
func = getattr(cmonary, name)
func.argtypes = [CTYPE_CODES[c] for c in argtypes]
func.restype = CTYPE_CODES[restype]
_decorate_cmonary_functions()
# Initialize Monary and register the cleanup function.
cmonary.monary_init()
atexit.register(cmonary.monary_cleanup)
# Table of type names and conversions between cmonary and numpy types.
MONARY_TYPES = {
# "common_name": (cmonary_type_code, numpy_type_object)
"id": (1, numpy.dtype("<V12")),
"bool": (2, numpy.bool),
"int8": (3, numpy.int8),
"int16": (4, numpy.int16),
"int32": (5, numpy.int32),
"int64": (6, numpy.int64),
"uint8": (7, numpy.uint8),
"uint16": (8, numpy.uint16),
"uint32": (9, numpy.uint32),
"uint64": (10, numpy.uint64),
"float32": (11, numpy.float32),
"float64": (12, numpy.float64),
"date": (13, numpy.int64),
"timestamp": (14, numpy.uint64),
# Note, numpy strings do not need the null character.
"string": (15, "S"),
# Raw data (void pointer).
"binary": (16, "<V"),
"bson": (17, "<V"),
"type": (18, numpy.uint8),
"size": (19, numpy.uint32),
"length": (20, numpy.uint32),
}
def get_monary_numpy_type(orig_typename):
"""Given a common typename, find the corresponding cmonary type number,
type argument, and numpy type object (or code).
The input typename must be one of the keys found in the ``MONARY_TYPES``
dictionary. These are common BSON type names such as ``id``, ``bool``,
``int32``, ``float64``, ``date``, or ``string``.
If the type is ``string``,``binary``, or ``bson``, its name must be
followed by a ``:size`` suffix indicating the maximum number of bytes
that will be used to store the representation.
:param str orig_typename: a common type name with optional argument
(for fields with a size)
:returns: (type_num, type_arg, numpy_type)
:rtype: tuple
"""
# Process any type_arg that might be included.
if ':' in orig_typename:
vals = orig_typename.split(':', 2)
if len(vals) > 2:
raise ValueError("Too many parts in type: %r" % orig_typename)
type_name, arg = vals
try:
type_arg = int(arg)
except ValueError:
raise ValueError("Unable to parse type argument in: %r"
% orig_typename)
else:
type_arg = 0
type_name = orig_typename
if type_name not in MONARY_TYPES:
raise ValueError("Unknown typename: %r" % type_name)
if type_name in ("string", "binary", "bson"):
if type_arg == 0:
raise ValueError("%r must have an explicit typearg with nonzero "
"length (use 'string:20', for example)"
% type_name)
type_num, numpy_type_code = MONARY_TYPES[type_name]
numpy_type = numpy.dtype("%s%i" % (numpy_type_code, type_arg))
else:
type_num, numpy_type = MONARY_TYPES[type_name]
return type_num, type_arg, numpy_type
def make_bson(obj):
"""Given a Python (JSON compatible) dictionary, returns a BSON string.
(This hijacks the Python -> BSON conversion code from pymongo,
which is needed for converting queries. Perhaps this dependency
can be removed in a later version.)
:param obj: object to be encoded as BSON (dict, string, or None)
:returns: BSON encoded representation (byte string)
:rtype: str
"""
if obj is None:
obj = {}
if not isinstance(obj, bytes_type):
obj = bson.BSON.encode(obj)
return obj
def mvoid_to_bson_id(mvoid):
"""Converts a numpy mvoid value to a BSON ObjectId.
:param mvoid: numpy.ma.core.mvoid returned from Monary
:returns: the _id as a bson ObjectId
:rtype: bson.objectid.ObjectId
"""
if PY3:
# Python 3.
string = str(mvoid)
string_list = ''.join(filter(lambda y: y not in '[]', string)).split()
ints = map(int, string_list)
uints = [x & 0xff for x in ints]
id_bytes = bytes(uints)
return bson.ObjectId(id_bytes)
else:
# Python 2.6 / 2.7.
return bson.ObjectId(str(mvoid))
def validate_insert_fields(fields):
"""Validate field names for insertion.
:param fields: a list of field names
:returns: None
"""
for f in fields:
if f.endswith('.'):
raise ValueError("invalid fieldname: %r, must not end in '.'" % f)
if '$' in f:
raise ValueError("invalid fieldname: %r, must not contain '$'" % f)
if len(fields) != len(set(fields)):
raise ValueError("field names must all be unique")
for f1 in fields:
for f2 in fields:
if f1 != f2 and f1.startswith(f2) and f1[len(f2)] == '.':
raise ValueError("fieldname %r conflicts with nested-document "
"fieldname %r" % (f2, f1))
def get_ordering_dict(obj):
"""Converts a field/direction specification to an OrderedDict, suitable
for BSON encoding.
:param obj: single field name or list of (field, direction) pairs
:returns: mapping representing the field/direction list
:rtype: OrderedDict
"""
if obj is None:
return OrderedDict()
elif isinstance(obj, string_type):
return OrderedDict([(obj, 1)])
elif isinstance(obj, list):
return OrderedDict(obj)
else:
raise ValueError("Invalid ordering: should be str or list of "
"(column, direction) pairs")
def get_plain_query(query):
"""Composes a plain query from the given query object.
:param dict query: query dictionary (or None)
:returns: BSON encoded query (byte string)
:rtype: str
"""
if query is None:
query = {}
return make_bson(query)
def get_full_query(query, sort=None, hint=None):
"""Composes a full query from the given query object, and sort and hint
clauses, if provided.
:param dict query: query dictionary (or None)
:param sort: (optional) single field name or list of (field, direction)
pairs
:param hint: (optional) single field name or list of (field, direction)
pairs
:returns: BSON encoded query (byte string)
:rtype: str
"""
if query is None:
query = {}
if sort or hint:
query = OrderedDict([("$query", query)])
if sort:
query["$orderby"] = get_ordering_dict(sort)
if hint:
query["$hint"] = get_ordering_dict(hint)
return make_bson(query)
def get_pipeline(pipeline):
"""Manipulates the input pipeline into a usable form."""
if isinstance(pipeline, list):
pipeline = {"pipeline": pipeline}
elif isinstance(pipeline, dict):
if "pipeline" not in pipeline:
pipeline = {"pipeline": [pipeline]}
else:
raise TypeError("Pipeline must be a dict or a list")
return pipeline
class Monary(object):
"""Represents a 'monary' connection to a particular MongoDB server."""
def __init__(self, host="localhost", port=27017, username=None,
password=None, database=None, pem_file=None,
pem_pwd=None, ca_file=None, ca_dir=None, crl_file=None,
weak_cert_validation=True, options=None):
"""
An example of initializing monary with a port and hostname:
>>> m = Monary("localhost", 27017)
An example of initializing monary with a URI and SSL parameters:
>>> m = Monary("mongodb://localhost:27017/?ssl=true",
... pem_file='client.pem', ca_file='ca.pem',
... crl_file='crl.pem')
:param host: either host name (or IP) to connect to, or full URI
:param port: port number of running MongoDB service on host
:param username: An optional username for authentication.
:param password: An optional password for authentication.
:param database: The database to authenticate to if the URI
specifies a username and password. If this is not specified but
credentials exist, this defaults to the "admin" database. See
mongoc_uri(7).
:param pem_file: SSL certificate and key file
:param pem_pwd: Passphrase for encrypted key file
:param ca_file: Certificate authority file
:param ca_dir: Directory for certificate authority files
:param crl_file: Certificate revocation list file
:param weak_cert_validation: bypass validation
:param options: Connection-specific options as a dict.
"""
self._cmonary = cmonary
self._connection = None
self.connect(host, port, username, password, database,
pem_file, pem_pwd, ca_file, ca_dir, crl_file,
weak_cert_validation, options)
def connect(self, host="localhost", port=27017, username=None,
password=None, database=None, p_file=None,
pem_pwd=None, ca_file=None, ca_dir=None, c_file=None,
weak_cert_validation=False, options=None):
"""Connects to the given host and port.
:param host: either host name (or IP) to connect to, or full URI
:param port: port number of running MongoDB service on host
:param username: An optional username for authentication.
:param password: An optional password for authentication.
:param database: The database to authenticate to if the URI
specifies a username and password. If this is not specified but
credentials exist, this defaults to the "admin" database. See
mongoc_uri(7).
:param p_file: SSL certificate and key file
:param pem_pwd: Passphrase for encrypted key file
:param ca_file: Certificate authority file
:param ca_dir: Directory for certificate authority files
:param c_file: Certificate revocation list file
:param weak_cert_validation: bypass validation
:param options: Connection-specific options as a dict.
:returns: True if successful; false otherwise.
:rtype: bool
"""
if self._connection is not None:
self.close()
if host.startswith("mongodb://"):
uri = host
else:
# Build up the URI string.
uri = ["mongodb://"]
if username is not None:
if password is None:
uri.append("%s@" % username)
else:
uri.append("%s:%s@" % (username, password))
elif password is not None:
raise ValueError("You cannot have a password with no"
" username.")
uri.append("%s:%d" % (host, port))
if database is not None:
uri.append("/%s" % database)
if options is not None:
uri.append("?%s" % urlencode(options))
uri = "".join(uri)
if sys.version >= "3":
p_file = bytes(p_file, "ascii") if p_file is not None else None
pem_pwd = bytes(pem_pwd, "ascii") if pem_pwd is not None else None
ca_file = bytes(ca_file, "ascii") if ca_file is not None else None
ca_dir = bytes(ca_dir, "ascii") if ca_dir is not None else None
c_file = bytes(c_file, "ascii") if c_file is not None else None
# Attempt the connection.
err = get_empty_bson_error()
self._connection = cmonary.monary_connect(
uri.encode('ascii'),
ctypes.c_char_p(p_file),
ctypes.c_char_p(pem_pwd),
ctypes.c_char_p(ca_file),
ctypes.c_char_p(ca_dir),
ctypes.c_char_p(c_file),
ctypes.c_bool(weak_cert_validation),
ctypes.byref(err))
if self._connection is None:
raise MonaryError(err.message)
def _make_column_data(self, fields, types, count):
"""Builds the 'column data' structure used by the underlying cmonary
code to populate the arrays. This code must allocate the array
objects, and provide their corresponding storage pointers and sizes
to cmonary.
:param fields: list of field names
:param types: list of Monary type names
:param count: size of storage to be allocated
:returns: (coldata, colarrays) where coldata is the cmonary
column data storage structure, and colarrays is a list of
numpy.ndarray instances
:rtype: tuple
"""
err = get_empty_bson_error()
numcols = len(fields)
if numcols != len(types):
raise ValueError("Number of fields and types do not match")
if numcols > MAX_COLUMNS:
raise ValueError("Number of fields exceeds maximum of %d"
% MAX_COLUMNS)
coldata = cmonary.monary_alloc_column_data(numcols, count)
if coldata is None:
raise MonaryError("Unable to allocate column data")
colarrays = []
for i, (field, typename) in enumerate(zip(fields, types)):
if len(field) > MAX_STRING_LENGTH:
raise ValueError("Length of field name %s exceeds "
"maximum of %d" % (field, MAX_COLUMNS))
c_type, c_type_arg, numpy_type = get_monary_numpy_type(typename)
data = numpy.zeros([count], dtype=numpy_type)
mask = numpy.ones([count], dtype=bool)
storage = numpy.ma.masked_array(data, mask)
colarrays.append(storage)
data_p = data.ctypes.data_as(ctypes.c_void_p)
mask_p = mask.ctypes.data_as(ctypes.c_void_p)
if cmonary.monary_set_column_item(
coldata,
i,
field.encode('ascii'),
c_type,
c_type_arg,
data_p,
mask_p,
ctypes.byref(err)) < 0:
raise MonaryError(err.message)
return coldata, colarrays
def _get_collection(self, db, collection):
"""Returns the specified collection to query against.
:param db: name of database
:param collection: name of collection
:returns: the collection
:rtype: cmonary mongoc_collection_t*
"""
if self._connection is not None:
return cmonary.monary_use_collection(self._connection,
db.encode('ascii'),
collection.encode('ascii'))
else:
raise MonaryError("Unable to get the collection %s.%s - "
"not connected" % (db, collection))
def count(self, db, coll, query=None):
"""Count the number of records returned by the given query.
:param db: name of database
:param coll: name of the collection to be queried
:param query: (optional) dictionary of Mongo query parameters
:returns: the number of records
:rtype: int
"""
collection = None
err = get_empty_bson_error()
try:
collection = self._get_collection(db, coll)
if collection is None:
raise MonaryError("Unable to get the collection %s.%s" %
(db, coll))
query = make_bson(query)
count = cmonary.monary_query_count(collection,
query,
ctypes.byref(err))
finally:
if collection is not None:
cmonary.monary_destroy_collection(collection)
if count < 0:
raise MonaryError(err.message)
return count
def query(self, db, coll, query, fields, types,
sort=None, hint=None,
limit=0, offset=0,
do_count=True, select_fields=False):
"""Performs an array query.
:param db: name of database
:param coll: name of the collection to be queried
:param query: dictionary of Mongo query parameters
:param fields: list of fields to be extracted from each record
:param types: corresponding list of field types
:param sort: (optional) single field name or list of
(field, direction) pairs
:param hint: (optional) single field name or list of
(field, direction) pairs
:param limit: (optional) limit number of records (and size
of arrays)
:param offset: (optional) skip this many records before gathering
results
:param bool do_count: count items before allocating arrays
(otherwise, array size is set to limit)
:param bool select_fields: select exact fields from database
(performance/bandwidth tradeoff)
:returns: list of numpy.ndarray, corresponding to the requested
fields and types
:rtype: list
"""
plain_query = get_plain_query(query)
full_query = get_full_query(query, sort, hint)
if not do_count and limit > 0:
count = limit
else:
# count() doesn't like $query/$orderby/$hint, so need plain query.
count = self.count(db, coll, plain_query)
if count > limit > 0:
count = limit
coldata = None
collection = None
err = get_empty_bson_error()
try:
coldata, colarrays = self._make_column_data(fields, types, count)
cursor = None
try:
collection = self._get_collection(db, coll)
if collection is None:
raise MonaryError("Unable to get the collection")
cursor = cmonary.monary_init_query(
collection,
offset,
limit,
full_query,
coldata,
select_fields,
ctypes.byref(err))
if cursor is None:
raise MonaryError(err.message)
if cmonary.monary_load_query(cursor, ctypes.byref(err)) < 0:
raise MonaryError(err.message)
finally:
if cursor is not None:
cmonary.monary_close_query(cursor)
if collection is not None:
cmonary.monary_destroy_collection(collection)
finally:
if coldata is not None:
cmonary.monary_free_column_data(coldata)
return colarrays
def block_query(self, db, coll, query, fields, types,
sort=None, hint=None,
block_size=8192, limit=0, offset=0,
select_fields=False):
"""Performs a block query.
:param db: name of database
:param coll: name of the collection to be queried
:param query: dictionary of Mongo query parameters
:param fields: list of fields to be extracted from each record
:param types: corresponding list of field types
:param sort: (optional) single field name or list of
(field, direction) pairs
:param hint: (optional) single field name or list of
(field, direction) pairs
:param block_size: (optional) size in number of rows of each
yeilded list
:param limit: (optional) limit number of records (and size of
arrays)
:param offset: (optional) skip this many records before gathering
results
:param bool select_fields: select exact fields from database
(performance/bandwidth tradeoff)
:returns: list of numpy.ndarray, corresponding to the requested
fields and types
:rtype: list
A block query is a query whose results are returned in
blocks of a given size. Instead of returning a list of arrays,
this generator yields portions of each array in multiple blocks,
where each block may contain up to *block_size* elements.
An example::
cumulative_gain = 0.0
for buy_price_block, sell_price_block in (
monary.block_query("finance", "assets", {"sold": True},
["buy_price", "sell_price"],
["float64", "float64"],
block_size=1024)):
# Vector subtraction.
gain = sell_price_block - buy_price_block
cumulative_gain += numpy.sum(gain)
.. note:: Memory for each block is reused between iterations.
If the caller wishes to retain the values from a given
iteration, it should copy the data.
"""
if block_size < 1:
block_size = 1
full_query = get_full_query(query, sort, hint)
coldata = None
collection = None
try:
coldata, colarrays = self._make_column_data(fields,
types,
block_size)
cursor = None
try:
collection = self._get_collection(db, coll)
if collection is None:
raise MonaryError("Unable to get the collection")
err = get_empty_bson_error()
cursor = cmonary.monary_init_query(
collection,
offset,
limit,
full_query,
coldata,
select_fields,
ctypes.byref(err))
if cursor is None:
raise MonaryError(err.message)
while True:
num_rows = cmonary.monary_load_query(cursor,
ctypes.byref(err))
if num_rows < 0:
raise MonaryError(err.message)
if num_rows == block_size:
yield colarrays
elif num_rows > 0:
yield [arr[:num_rows] for arr in colarrays]
break
else:
break
finally:
if cursor is not None:
cmonary.monary_close_query(cursor)
if collection is not None:
cmonary.monary_destroy_collection(collection)
finally:
if coldata is not None:
cmonary.monary_free_column_data(coldata)
def insert(self, db, coll, params, write_concern=None):
"""Performs an insertion of data from arrays.
:param db: name of database
:param coll: name of the collection to insert into
:param params: list of MonaryParams to be inserted
:param write_concern: (optional) a WriteConcern object.
:returns: A numpy array of the inserted documents ObjectIds. Masked
values indicate documents that failed to be inserted.
:rtype: numpy.ma.core.MaskedArray
.. note:: Params will be sorted by field before insertion. To ensure
that _id is the first filled in all generated documents
and that nested keys are consecutive, all keys will be
sorted alphabetically before the insertions are performed.
The corresponding types and data will be sorted the same
way to maintain the original correspondence.
"""
err = get_empty_bson_error()
if len(params) == 0:
raise ValueError("cannot do an empty insert")
validate_insert_fields(list(map(lambda p: p.field, params)))
# To ensure that _id is the first key, the string "_id" is mapped
# to chr(0). This will put "_id" in front of any other field.
params = sorted(
params, key=lambda p: p.field if p.field != "_id" else chr(0))
if params[0].field == "_id" and params[0].array.mask.any():
raise ValueError("the _id array must not have any masked values")
if len(set(len(p) for p in params)) != 1:
raise ValueError("all given arrays must be of the same length")
collection = None
coldata = None
id_data = None
try:
coldata = cmonary.monary_alloc_column_data(len(params),
len(params[0]))
for i, param in enumerate(params):
data_p = param.array.data.ctypes.data_as(ctypes.c_void_p)
mask_p = param.array.mask.ctypes.data_as(ctypes.c_void_p)
if cmonary.monary_set_column_item(
coldata,
i,
param.field.encode("utf-8"),
param.cmonary_type,
param.cmonary_type_arg,
data_p,
mask_p,
ctypes.byref(err)) < 0:
raise MonaryError(err.message)
# Create a new column for the ids to be returned.
id_data = cmonary.monary_alloc_column_data(1, len(params[0]))
if params[0].field == "_id":
# If the user specifies "_id", it will be sorted to the front.
ids = numpy.copy(params[0].array)
c_type = params[0].cmonary_type
c_type_arg = params[0].cmonary_type_arg
else:
# Allocate a single column to return the generated ObjectIds.
c_type, c_type_arg, numpy_type = get_monary_numpy_type("id")
ids = numpy.zeros(len(params[0]), dtype=numpy_type)
mask = numpy.ones(len(params[0]))
ids = numpy.ma.masked_array(ids, mask)
if cmonary.monary_set_column_item(
id_data,
0,
"_id".encode("utf-8"),
c_type,
c_type_arg,
ids.data.ctypes.data_as(ctypes.c_void_p),
ids.mask.ctypes.data_as(ctypes.c_void_p),
ctypes.byref(err)) < 0:
raise MonaryError(err.message)
collection = self._get_collection(db, coll)
if collection is None:
raise ValueError("unable to get the collection")
if write_concern is None:
write_concern = WriteConcern()
cmonary.monary_insert(
collection,
coldata,
id_data,
self._connection,
write_concern.get_c_write_concern(),
ctypes.byref(err))
return ids
finally:
if write_concern is not None:
write_concern.destroy_c_write_concern()
if coldata is not None:
cmonary.monary_free_column_data(coldata)
if id_data is not None:
cmonary.monary_free_column_data(id_data)
if collection is not None:
cmonary.monary_destroy_collection(collection)
def aggregate(self, db, coll, pipeline, fields, types, limit=0,
do_count=True):
"""Performs an aggregation operation.
:param: db: name of database
:param coll: name of collection on which to perform the aggregation
:param pipeline: a list of pipeline stages
:param fields: list of fields to be extracted from the result
:param types: corresponding list of field types
:returns: list of numpy.ndarray, corresponding to the requested
fields and types
:rtype: list
"""
# Convert the pipeline to a usable form.
pipeline = get_pipeline(pipeline)
# Determine sizing for array allocation.
if not do_count and limit > 0:
# Limit ourselves to only the first ``count`` records.
count = limit
else:
# Use the aggregation pipeline to count the result size.
count_stage = {"$group": {"_id": 1, "count": {"$sum": 1}}}
pipe_copy = copy.deepcopy(pipeline)
pipe_copy["pipeline"].append(count_stage)
# Extract the count.
result, = self.aggregate(db, coll, pipe_copy, ["count"], ["int64"],
limit=1, do_count=False)
result = result.compressed()
if len(result) == 0:
# The count returned was masked.
raise MonaryError("Failed to count the aggregation size")
else:
count = result[0]
if count > limit > 0:
count = limit
encoded_pipeline = get_plain_query(pipeline)
coldata = None
collection = None
try:
coldata, colarrays = self._make_column_data(fields, types, count)
cursor = None
try:
collection = self._get_collection(db, coll)
if collection is None:
raise MonaryError("Unable to get the collection")
err = get_empty_bson_error()
cursor = cmonary.monary_init_aggregate(collection,
encoded_pipeline,
coldata,
ctypes.byref(err))
if cursor is None:
raise MonaryError(err.message)
if cmonary.monary_load_query(cursor, ctypes.byref(err)) < 0:
raise MonaryError(err.message)
finally:
if cursor is not None:
cmonary.monary_close_query(cursor)
if collection is not None:
cmonary.monary_destroy_collection(collection)
finally:
if coldata is not None:
cmonary.monary_free_column_data(coldata)
return colarrays
def block_aggregate(self, db, coll, pipeline, fields, types,
block_size=8192, limit=0):
"""Performs an aggregation operation.
Perform an aggregation operation on a collection, returning the
results in blocks of size ``block_size``.
"""
if block_size < 1:
block_size = 1
pipeline = get_pipeline(pipeline)
encoded_pipeline = get_plain_query(pipeline)
coldata = None
collection = None
try:
coldata, colarrays = self._make_column_data(fields,
types,
block_size)
cursor = None
try:
collection = self._get_collection(db, coll)
if collection is None:
raise MonaryError("Unable to get the collection")
err = get_empty_bson_error()
cursor = cmonary.monary_init_aggregate(collection,
encoded_pipeline,
coldata,
ctypes.byref(err))
if cursor is None:
raise MonaryError(err.message)
err = get_empty_bson_error()
while True:
num_rows = cmonary.monary_load_query(cursor,
ctypes.byref(err))
if num_rows < 0:
raise MonaryError(err.message)
if num_rows == block_size:
yield colarrays
elif num_rows > 0:
yield [arr[:num_rows] for arr in colarrays]
break
else:
break
finally:
if cursor is not None:
cmonary.monary_close_query(cursor)
if collection is not None:
cmonary.monary_destroy_collection(collection)
finally:
if coldata is not None:
cmonary.monary_free_column_data(coldata)
def close(self):
"""Closes the current connection, if any."""
if self._connection is not None:
cmonary.monary_disconnect(self._connection)
self._connection = None
def __enter__(self):
"""Monary connections meet the ContextManager protocol."""
return self
def __exit__(self, *args):
"""Monary connections meet the ContextManager protocol."""
self.close()
def __del__(self):
"""Closes the Monary connection and cleans up resources."""
self.close()
self._cmonary = None
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log as logging
from keystone.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
|
|
import numpy as np
import pandas as pd
from pandas import DataFrame
from collections import defaultdict
STATS_COLUMNS = ['_expanded_income', 'c00100', '_standard', 'c04470', 'c04600', 'c04800', 'c05200',
'c62100','c09600', 'c05800', 'c09200', '_refund', 'c07100',
'_ospctax','s006']
# each entry in this array corresponds to the same entry in the array
# TABLE_LABELS below. this allows us to use TABLE_LABELS to map a
# label to the correct column in our distribution table
TABLE_COLUMNS = ['s006','c00100', 'num_returns_StandardDed', '_standard',
'num_returns_ItemDed', 'c04470', 'c04600', 'c04800', 'c05200',
'c62100','num_returns_AMT', 'c09600', 'c05800', 'c07100','c09200',
'_refund','_ospctax']
TABLE_LABELS = ['Returns', 'AGI', 'Standard Deduction Filers',
'Standard Deduction', 'Itemizers',
'Itemized Deduction', 'Personal Exemption',
'Taxable Income', 'Regular Tax', 'AMTI', 'AMT Filers', 'AMT',
'Tax before Credits', 'Non-refundable Credits',
'Tax before Refundable Credits', 'Refundable Credits',
'Revenue']
# used in our difference table to label the columns
DIFF_TABLE_LABELS = ["Tax Units with Tax Cut", "Tax Units with Tax Increase",
"Count", "Average Tax Change", "Total Tax Difference",
"Percent with Tax Increase", "Percent with Tax Decrease",
"Share of Overall Change"]
LARGE_INCOME_BINS = [-1e14, 0, 9999, 19999, 29999, 39999, 49999, 74999, 99999,
200000, 1e14]
SMALL_INCOME_BINS = [-1e14, 0, 4999, 9999, 14999, 19999, 24999, 29999, 39999,
49999, 74999, 99999, 199999, 499999, 999999, 1499999,
1999999, 4999999, 9999999, 1e14]
WEBAPP_INCOME_BINS = [-1e14, 0, 9999, 19999, 29999, 39999, 49999, 74999, 99999,
199999, 499999, 1000000, 1e14]
def extract_array(f):
"""
A sanity check decorator. When combined with numba.vectorize
or guvectorize, it provides the same capability as dataframe_vectorize
or dataframe_guvectorize
"""
def wrapper(*args, **kwargs):
arrays = [arg.values for arg in args]
return f(*arrays)
return wrapper
def expand_1D(x, inflate, inflation_rates, num_years):
"""
Expand the given data to account for the given number of budget years.
If necessary, pad out additional years by increasing the last given
year at the provided inflation rate.
"""
assert len(inflation_rates) == num_years
if isinstance(x, np.ndarray):
if len(x) >= num_years:
return x
else:
ans = np.zeros(num_years, dtype='f8')
ans[:len(x)] = x
if inflate:
extra = []
cur = x[-1]
for i in range(1, num_years - len(x) + 1):
inf_idx = i + len(x) - 1
cur *= (1. + inflation_rates[inf_idx])
extra.append(cur)
else:
extra = [float(x[-1]) for i in
range(1, num_years - len(x) + 1)]
ans[len(x):] = extra
return ans.astype(x.dtype, casting='unsafe')
return expand_1D(np.array([x]), inflate, inflation_rates, num_years)
def expand_2D(x, inflate, inflation_rates, num_years):
"""
Expand the given data to account for the given number of budget years.
For 2D arrays, we expand out the number of rows until we have num_years
number of rows. For each expanded row, we inflate by the given inflation
rate.
"""
if isinstance(x, np.ndarray):
# Look for -1s and create masks if present
last_good_row = -1
keep_user_data_mask = []
keep_calc_data_mask = []
has_nones = False
for row in x:
keep_user_data_mask.append([1 if i != -1 else 0 for i in row])
keep_calc_data_mask.append([0 if i != -1 else 1 for i in row])
if not np.any(row == -1):
last_good_row += 1
else:
has_nones = True
if x.shape[0] >= num_years and not has_nones:
return x
else:
if has_nones:
c = x[:last_good_row+1]
keep_user_data_mask = np.array(keep_user_data_mask)
keep_calc_data_mask = np.array(keep_calc_data_mask)
else:
c = x
ans = np.zeros((num_years, c.shape[1]))
ans[:len(c), :] = c
if inflate:
extra = []
cur = c[-1]
for i in range(0, num_years - len(c)):
inf_idx = i + len(c) - 1
cur = np.array(cur*(1. + inflation_rates[inf_idx]))
extra.append(cur)
else:
extra = [c[-1, :] for i in
range(1, num_years - len(c) + 1)]
ans[len(c):, :] = extra
if has_nones:
# Use masks to "mask in" provided data and "mask out"
# data we don't need (produced in rows with a None value)
ans = ans * keep_calc_data_mask
user_vals = x * keep_user_data_mask
ans = ans + user_vals
return ans.astype(c.dtype, casting='unsafe')
return expand_2D(np.array(x), inflate, inflation_rates, num_years)
def strip_Nones(x):
"""
Takes a list of scalar values or a list of lists.
If it is a list of scalar values, when None is encountered, we
return everything encountered before. If a list of lists, we
replace None with -1 and return
Parameters
----------
x: list
Returns
-------
list
"""
accum = []
for val in x:
if val is None:
return accum
if not isinstance(val, list):
accum.append(val)
else:
for i, v in enumerate(val):
if v is None:
val[i] = -1
accum.append(val)
return accum
def expand_array(x, inflate, inflation_rates, num_years):
"""
Dispatch to either expand_1D or expand2D depending on the dimension of x
Parameters
----------
x : value to expand
inflate: Boolean
As we expand, inflate values if this is True, otherwise, just copy
inflation_rate: float
Yearly inflation reate
num_years: int
Number of budget years to expand
Returns
-------
expanded numpy array
"""
x = np.array(strip_Nones(x))
try:
if len(x.shape) == 1:
return expand_1D(x, inflate, inflation_rates, num_years)
elif len(x.shape) == 2:
return expand_2D(x, inflate, inflation_rates, num_years)
else:
raise ValueError("Need a 1D or 2D array")
except AttributeError:
raise ValueError("Must pass a numpy array")
def count_gt_zero(agg):
return sum([1 for a in agg if a > 0])
def count_lt_zero(agg):
return sum([1 for a in agg if a < 0])
def weighted_count_lt_zero(agg, col_name, tolerance=-0.001):
return agg[agg[col_name] < tolerance]['s006'].sum()
def weighted_count_gt_zero(agg, col_name, tolerance=0.001):
return agg[agg[col_name] > tolerance]['s006'].sum()
def weighted_count(agg):
return agg['s006'].sum()
def weighted_mean(agg, col_name):
return float((agg[col_name]*agg['s006']).sum()) / float(agg['s006'].sum())
def weighted_sum(agg, col_name):
return (agg[col_name]*agg['s006']).sum()
def weighted_perc_inc(agg, col_name):
return (float(weighted_count_gt_zero(agg, col_name)) /
float(weighted_count(agg)))
def weighted_perc_dec(agg, col_name):
return (float(weighted_count_lt_zero(agg, col_name)) /
float(weighted_count(agg)))
def weighted_share_of_total(agg, col_name, total):
return float(weighted_sum(agg, col_name)) / float(total)
def add_weighted_decile_bins(df, income_measure='_expanded_income'):
"""
Add a column of income bins based on each 10% of the income_measure,
weighted by s006.
The default income_measure is `expanded_income`, but `c00100` also works.
This function will server as a "grouper" later on.
"""
# First, sort by income_measure
df.sort(income_measure, inplace=True)
# Next, do a cumulative sum by the weights
df['cumsum_weights'] = np.cumsum(df['s006'].values)
# Max value of cum sum of weights
max_ = df['cumsum_weights'].values[-1]
# Create 10 bins and labels based on this cumulative weight
bins = [0] + list(np.arange(1, 11)*(max_/10.0))
labels = [range(1, 11)]
# Groupby weighted deciles
df['bins'] = pd.cut(df['cumsum_weights'], bins, labels)
return df
def add_income_bins(df, compare_with="soi", bins=None, right=True, income_measure='_expanded_income'):
"""
Add a column of income bins of income_measure using pandas 'cut'.
This will serve as a "grouper" later on.
Parameters
----------
df: DataFrame object
the object to which we are adding bins
compare_with: String, optional
options for input: 'tpc', 'soi', 'webapp'
determines which types of bins will be added
default: 'soi'
bins: iterable of scalars, optional income breakpoints.
Follows pandas convention. The breakpoint is inclusive if
right=True. This argument overrides any choice of compare_with.
right : bool, optional
Indicates whether the bins include the rightmost edge or not.
If right == True (the default), then the bins [1,2,3,4]
indicate (1,2], (2,3], (3,4].
Returns
-------
df: DataFrame object
the original input that bins have been added to
"""
if not bins:
if compare_with == "tpc":
bins = LARGE_INCOME_BINS
elif compare_with == "soi":
bins = SMALL_INCOME_BINS
elif compare_with == "webapp":
bins = WEBAPP_INCOME_BINS
else:
msg = "Unknown compare_with arg {0}".format(compare_with)
raise ValueError(msg)
# Groupby income_measure bins
df['bins'] = pd.cut(df[income_measure], bins, right=right)
return df
def means_and_comparisons(df, col_name, gp, weighted_total):
"""
Using grouped values, perform aggregate operations
to populate
df: DataFrame for full results of calculation
col_name: the column name to calculate against
gp: grouped DataFrame
"""
# Who has a tax cut, and who has a tax increase
diffs = gp.apply(weighted_count_lt_zero, col_name)
diffs = DataFrame(data=diffs, columns=['tax_cut'])
diffs['tax_inc'] = gp.apply(weighted_count_gt_zero, col_name)
diffs['count'] = gp.apply(weighted_count)
diffs['mean'] = gp.apply(weighted_mean, col_name)
diffs['tot_change'] = gp.apply(weighted_sum, col_name)
diffs['perc_inc'] = gp.apply(weighted_perc_inc, col_name)
diffs['perc_cut'] = gp.apply(weighted_perc_dec, col_name)
diffs['share_of_change'] = gp.apply(weighted_share_of_total,
col_name, weighted_total)
return diffs
def weighted(df, X):
agg = df
for colname in X:
if not colname.startswith('s006'):
agg[colname] = df[colname]*df['s006']
return agg
def get_sums(df, na=False):
"""
Gets the unweighted sum of each column, saving the col name
and the corresponding sum
Returns
-------
pandas.Series
"""
sums = defaultdict(lambda: 0)
for col in df.columns.tolist():
if col != 'bins':
if na:
sums[col] = 'n/a'
else:
sums[col] = (df[col]).sum()
return pd.Series(sums, name='sums')
def results(c):
"""
Gets the results from the tax calculator and organizes them into a table
Parameters
----------
c : Calculator object
Returns
-------
DataFrame object
"""
outputs = []
for col in STATS_COLUMNS:
if hasattr(c, 'records') and hasattr(c, 'params'):
if hasattr(c.params, col):
outputs.append(getattr(c.params, col))
else:
outputs.append(getattr(c.records, col))
else:
outputs.append(getattr(c, col))
return DataFrame(data=np.column_stack(outputs), columns=STATS_COLUMNS)
def weighted_avg_allcols(df, cols, income_measure='_expanded_income'):
diff = DataFrame(df.groupby('bins', as_index=False).apply(weighted_mean, income_measure),
columns=[income_measure])
for col in cols:
if (col == "s006" or col == 'num_returns_StandardDed' or
col == 'num_returns_ItemDed' or col == 'num_returns_AMT'):
diff[col] = df.groupby('bins', as_index=False)[col].sum()[col]
elif col != income_measure:
diff[col] = df.groupby('bins', as_index=False).apply(weighted_mean, col)
return diff
def create_distribution_table(calc, groupby, result_type,
income_measure='_expanded_income'):
"""
Gets results given by the tax calculator, sorts them based on groupby, and
manipulates them based on result_type. Returns these as a table
Parameters
----------
calc : the Calculator object
groupby : String object
options for input: 'weighted_deciles', 'small_income_bins',
'large_income_bins', 'webapp_income_bins';
determines how the columns in the resulting DataFrame are sorted
result_type : String object
options for input: 'weighted_sum' or 'weighted_avg';
determines how the data should be manipulated
Notes
-----
Taxpayer Characteristics:
c04470 : Total itemized deduction
c00100 : AGI (Defecit)
c09600 : Alternative minimum tax
s006 : used to weight population
Returns
-------
DataFrame object
"""
res = results(calc)
# weight of returns with positive AGI and
# itemized deduction greater than standard deduction
res['c04470'] = res['c04470'].where(((res['c00100'] > 0) &
(res['c04470'] > res['_standard'])), 0)
# weight of returns with positive AGI and itemized deduction
res['num_returns_ItemDed'] = res['s006'].where(((res['c00100'] > 0) &
(res['c04470'] > 0)), 0)
# weight of returns with positive AGI and standard deduction
res['num_returns_StandardDed'] = res['s006'].where(((res['c00100'] > 0) &
(res['_standard'] > 0)), 0)
# weight of returns with positive Alternative Minimum Tax (AMT)
res['num_returns_AMT'] = res['s006'].where(res['c09600'] > 0, 0)
# sorts the data
if groupby == "weighted_deciles":
df = add_weighted_decile_bins(res, income_measure=income_measure)
elif groupby == "small_income_bins":
df = add_income_bins(res, compare_with="soi", income_measure=income_measure)
elif groupby == "large_income_bins":
df = add_income_bins(res, compare_with="tpc", income_measure=income_measure)
elif groupby == "webapp_income_bins":
df = add_income_bins(res, compare_with="webapp", income_measure=income_measure)
else:
err = ("groupby must be either 'weighted_deciles' or 'small_income_bins'"
"or 'large_income_bins' or 'webapp_income_bins'")
raise ValueError(err)
# manipulates the data
pd.options.display.float_format = '{:8,.0f}'.format
if result_type == "weighted_sum":
df = weighted(df, STATS_COLUMNS)
gp_mean = df.groupby('bins', as_index=False)[TABLE_COLUMNS].sum()
gp_mean.drop('bins', axis=1, inplace=True)
sum_row = get_sums(df)[TABLE_COLUMNS]
elif result_type == "weighted_avg":
gp_mean = weighted_avg_allcols(df, TABLE_COLUMNS, income_measure=income_measure)
sum_row = get_sums(df, na=True)[TABLE_COLUMNS]
else:
err = ("result_type must be either 'weighted_sum' or 'weighted_avg")
raise ValueError(err)
return gp_mean.append(sum_row)
def create_difference_table(calc1, calc2, groupby,
income_measure='_expanded_income'):
"""
Gets results given by the two different tax calculators and outputs
a table that compares the differing results.
The table is sorted according the the groupby input.
Parameters
----------
calc1, the first Calculator object
calc2, the other Calculator object
groupby, String object
options for input: 'weighted_deciles', 'small_income_bins',
'large_income_bins', 'webapp_income_bins'
determines how the columns in the resulting DataFrame are sorted
Returns
-------
DataFrame object
"""
res1 = results(calc1)
res2 = results(calc2)
if groupby == "weighted_deciles":
df = add_weighted_decile_bins(res2, income_measure=income_measure)
elif groupby == "small_income_bins":
df = add_income_bins(res2, compare_with="soi", income_measure=income_measure)
elif groupby == "large_income_bins":
df = add_income_bins(res2, compare_with="tpc", income_measure=income_measure)
elif groupby == "webapp_income_bins":
df = add_income_bins(res2, compare_with="webapp", income_measure=income_measure)
else:
err = ("groupby must be either 'weighted_deciles' or 'small_income_bins'"
"or 'large_income_bins' or 'webapp_income_bins'")
raise ValueError(err)
# Difference in plans
# Positive values are the magnitude of the tax increase
# Negative values are the magnitude of the tax decrease
res2['tax_diff'] = res2['_ospctax'] - res1['_ospctax']
diffs = means_and_comparisons(res2, 'tax_diff',
df.groupby('bins', as_index=False),
(res2['tax_diff']*res2['s006']).sum())
sum_row = get_sums(diffs)[diffs.columns.tolist()]
diffs = diffs.append(sum_row)
pd.options.display.float_format = '{:8,.0f}'.format
srs_inc = ["{0:.2f}%".format(val * 100) for val in diffs['perc_inc']]
diffs['perc_inc'] = pd.Series(srs_inc, index=diffs.index)
srs_cut = ["{0:.2f}%".format(val * 100) for val in diffs['perc_cut']]
diffs['perc_cut'] = pd.Series(srs_cut, index=diffs.index)
srs_change = ["{0:.2f}%".format(val * 100)
for val in diffs['share_of_change']]
diffs['share_of_change'] = pd.Series(srs_change, index=diffs.index)
# columns containing weighted values relative to the binning mechanism
non_sum_cols = [x for x in diffs.columns.tolist()
if 'mean' in x or 'perc' in x]
for col in non_sum_cols:
diffs.loc['sums', col] = 'n/a'
return diffs
|
|
import logging
import json
import multiprocessing
import os
import signal
from threading import Lock
import time
import tornado.httpserver
import tornado.netutil
import tornado.web
from zmq.eventloop import ioloop
from threading import Thread
from twisted.internet import reactor
from node import upnp
from node.db_store import Obdb
from node.market import Market
from node.transport import CryptoTransportLayer
from node.util import open_default_webbrowser, is_mac
from node.ws import WebSocketHandler
if is_mac():
from node.util import osx_check_dyld_library_path
osx_check_dyld_library_path()
ioloop.install()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/html/index.html")
class OpenBazaarStaticHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
self.set_header("X-Frame-Options", "DENY")
self.set_header("X-Content-Type-Options", "nosniff")
class OpenBazaarContext(object):
"""
This Object holds all of the runtime parameters
necessary to start an OpenBazaar instance.
This object is convenient to pass on method interfaces,
and reduces issues of API inconsistencies (as in the order
in which parameters are passed, which can cause bugs)
"""
def __init__(self,
nat_status,
server_ip,
server_port,
http_ip,
http_port,
db_path,
log_path,
log_level,
market_id,
bm_user,
bm_pass,
bm_port,
seeds,
seed_mode,
dev_mode,
dev_nodes,
disable_upnp,
disable_stun_check,
disable_open_browser,
disable_sqlite_crypt,
enable_ip_checker):
self.nat_status = nat_status
self.server_ip = server_ip
self.server_port = server_port
self.http_ip = http_ip
self.http_port = http_port
self.db_path = db_path
self.log_path = log_path
self.log_level = log_level
self.market_id = market_id
self.bm_user = bm_user
self.bm_pass = bm_pass
self.bm_port = bm_port
self.seeds = seeds
self.seed_mode = seed_mode
self.dev_mode = dev_mode
self.dev_nodes = dev_nodes
self.disable_upnp = disable_upnp
self.disable_stun_check = disable_stun_check
self.disable_open_browser = disable_open_browser
self.disable_sqlite_crypt = disable_sqlite_crypt
self.enable_ip_checker = enable_ip_checker
# to deduct up-time, and (TODO) average up-time
# time stamp in (non-local) Coordinated Universal Time format.
self.started_utc_timestamp = int(time.time())
def __repr__(self):
representation = {"server_ip": self.server_ip,
"server_port": self.server_port,
"http_ip": self.http_ip,
"http_port": self.http_port,
"log_path": self.log_path,
"market_id": self.market_id,
"bm_user": self.bm_user,
"bm_pass": self.bm_pass,
"bm_port": self.bm_port,
"seeds": self.seeds,
"seed_mode": self.seed_mode,
"dev_mode": self.dev_mode,
"dev_nodes": self.dev_nodes,
"log_level": self.log_level,
"db_path": self.db_path,
"disable_upnp": self.disable_upnp,
"disable_open_browser": self.disable_open_browser,
"disable_sqlite_crypt": self.disable_sqlite_crypt,
"enable_ip_checker": self.enable_ip_checker,
"started_utc_timestamp": self.started_utc_timestamp,
"uptime_in_secs": (int(time.time()) -
int(self.started_utc_timestamp))}
return json.dumps(representation).replace(", ", ",\n ")
@staticmethod
def get_defaults():
return {'market_id': 1,
'server_ip': '127.0.0.1',
'server_port': 12345,
'log_dir': 'logs',
'log_file': 'production.log',
'dev_log_file': 'development-{0}.log',
'db_dir': 'db',
'db_file': 'ob.db',
'dev_db_file': 'ob-dev-{0}.db',
'dev_mode': False,
'dev_nodes': 3,
'seed_mode': False,
'seeds': [
'seed.openbazaar.org',
'seed2.openbazaar.org',
'seed.openlabs.co',
'us.seed.bizarre.company',
'eu.seed.bizarre.company'
],
'disable_upnp': False,
'disable_stun_check': False,
'disable_open_browser': False,
'disable_sqlite_crypt': False,
'log_level': 30,
# CRITICAL=50 ERROR=40 WARNING=30 DEBUG=10 DEBUGV=9 DATADUMP=5 NOTSET=0
'http_ip': '127.0.0.1',
'http_port': 0,
'bm_user': None,
'bm_pass': None,
'bm_port': -1,
'enable_ip_checker': False,
'config_file': None}
@staticmethod
def create_default_instance():
defaults = OpenBazaarContext.get_defaults()
return OpenBazaarContext(
None,
server_ip=defaults['server_ip'],
server_port=defaults['server_port'],
http_ip=defaults['http_ip'],
http_port=defaults['http_port'],
db_path=os.path.join(defaults['db_dir'], defaults['db_file']),
log_path=os.path.join(defaults['log_dir'], defaults['log_file']),
log_level=defaults['log_level'],
market_id=defaults['market_id'],
bm_user=defaults['bm_user'],
bm_pass=defaults['bm_pass'],
bm_port=defaults['bm_port'],
seeds=defaults['seeds'],
seed_mode=defaults['seed_mode'],
dev_mode=defaults['dev_mode'],
dev_nodes=defaults['dev_nodes'],
disable_upnp=defaults['disable_upnp'],
disable_stun_check=defaults['disable_stun_check'],
disable_open_browser=defaults['disable_open_browser'],
disable_sqlite_crypt=defaults['disable_sqlite_crypt'],
enable_ip_checker=defaults['enable_ip_checker']
)
class MarketApplication(tornado.web.Application):
def __init__(self, ob_ctx):
self.shutdown_mutex = Lock()
self.ob_ctx = ob_ctx
db_connection = Obdb(ob_ctx.db_path, ob_ctx.disable_sqlite_crypt)
self.transport = CryptoTransportLayer(ob_ctx, db_connection)
self.market = Market(self.transport, db_connection)
self.upnp_mapper = None
Thread(target=reactor.run, args=(False,)).start()
peers = ob_ctx.seeds if not ob_ctx.seed_mode else []
self.transport.join_network(peers)
handlers = [
(r"/", MainHandler),
(r"/main", MainHandler),
(r"/html/(.*)", OpenBazaarStaticHandler, {'path': './html'}),
(r"/ws", WebSocketHandler,
{
'transport': self.transport,
'market_application': self,
'db_connection': db_connection
})
]
# TODO: Move debug settings to configuration location
settings = dict(debug=True)
super(MarketApplication, self).__init__(handlers, **settings)
def start_app(self):
# If self.ob_ctx.http_port is 0, the kernel is queried for a port.
sockets = tornado.netutil.bind_sockets(
self.ob_ctx.http_port,
address=self.ob_ctx.http_ip
)
server = tornado.httpserver.HTTPServer(self)
server.add_sockets(sockets)
self.ob_ctx.http_port = sockets[0].getsockname()[1]
if not self.ob_ctx.disable_upnp:
self.setup_upnp_port_mappings(self.ob_ctx.server_port)
else:
print "MarketApplication.start_app(): Disabling upnp setup"
def setup_upnp_port_mappings(self, p2p_port):
result = False
if not self.ob_ctx.disable_upnp:
upnp.PortMapper.DEBUG = False
print "Setting up UPnP Port Map Entry..."
self.upnp_mapper = upnp.PortMapper()
self.upnp_mapper.clean_my_mappings(p2p_port)
result_tcp_p2p_mapping = self.upnp_mapper.add_port_mapping(
p2p_port, p2p_port
)
print "UPnP TCP P2P Port Map configuration done ",
print "(%s -> %s) => %s" % (
p2p_port, p2p_port, result_tcp_p2p_mapping
)
result_udp_p2p_mapping = self.upnp_mapper.add_port_mapping(
p2p_port, p2p_port, 'UDP'
)
print "UPnP UDP P2P Port Map configuration done ",
print "(%s -> %s) => %s" % (
p2p_port, p2p_port, result_udp_p2p_mapping
)
result = result_tcp_p2p_mapping and result_udp_p2p_mapping
if not result:
print "Warning: UPnP was not setup correctly. ",
print "Ports could not be automatically mapped."
print "If you only see two or three stores, here are some tips:"
print "1. If you are using VPN, configure port forwarding or disable your VPN temporarily"
print "2. Configure your router to forward traffic from port",
print "%s for both TCP and UDP to your local port %s" % (p2p_port, p2p_port)
return result
def cleanup_upnp_port_mapping(self):
if not self.ob_ctx.disable_upnp:
try:
if self.upnp_mapper is not None:
print "Cleaning UPnP Port Mapping -> ", \
self.upnp_mapper.clean_my_mappings(self.transport.port)
except AttributeError:
print (
"[openbazaar] "
"MarketApplication.clean_upnp_port_mapping() failed!"
)
def shutdown(self, x=None, y=None):
self.shutdown_mutex.acquire()
print "MarketApplication.shutdown!"
log = logging.getLogger(
'[%s] %s' % (self.market.market_id, 'root')
)
log.info("Received TERMINATE, exiting...")
self.cleanup_upnp_port_mapping()
tornado.ioloop.IOLoop.instance().stop()
self.transport.shutdown()
self.shutdown_mutex.release()
os._exit(0)
def start_io_loop():
if not tornado.ioloop.IOLoop.instance():
ioloop.install()
try:
tornado.ioloop.IOLoop.instance().start()
except Exception as exc:
print "openbazaar::start_io_loop Exception:", exc
raise
def create_logger(ob_ctx):
logger = None
try:
logger = logging.getLogger()
logger.setLevel(int(ob_ctx.log_level))
handler = logging.handlers.RotatingFileHandler(
ob_ctx.log_path,
encoding='utf-8',
maxBytes=50000000,
backupCount=1
)
log_format = logging.Formatter(
u'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(log_format)
logger.addHandler(handler)
except Exception as exc:
print "Could not setup logger, continuing: ", exc.message
return logger
def log_openbazaar_start(log, ob_ctx):
log.info("Started OpenBazaar Web App at http://%s:%s" %
(ob_ctx.http_ip, ob_ctx.http_port))
print "Started OpenBazaar Web App at http://%s:%s" % (ob_ctx.http_ip, ob_ctx.http_port)
def attempt_browser_open(ob_ctx):
if not ob_ctx.disable_open_browser:
open_default_webbrowser(
'http://%s:%s' % (ob_ctx.http_ip, ob_ctx.http_port))
def setup_signal_handlers(application):
try:
signal.signal(signal.SIGTERM, application.shutdown)
except ValueError:
pass
def node_starter(ob_ctxs):
# This is the target for the the Process which
# will spawn the children processes that spawn
# the actual OpenBazaar instances.
for ob_ctx in ob_ctxs:
process = multiprocessing.Process(
target=start_node, args=(ob_ctx,),
name="Process::openbazaar_daemon::target(start_node)")
process.daemon = False # python has to wait for this user thread to end.
process.start()
def start_node(ob_ctx):
logger = create_logger(ob_ctx)
application = MarketApplication(ob_ctx)
setup_signal_handlers(application)
application.start_app()
log_openbazaar_start(logger, ob_ctx)
attempt_browser_open(ob_ctx)
start_io_loop()
|
|
"""
This module implements the Jobs class, which is intended to be a container-like
interface for all of the jobs defined on a single Jenkins server.
"""
import logging
import time
from jenkinsapi.job import Job
from jenkinsapi.custom_exceptions import JenkinsAPIException, UnknownJob
log = logging.getLogger(__name__)
class Jobs(object):
"""
This class provides a container-like API which gives
access to all jobs defined on the Jenkins server. It behaves
like a dict in which keys are Job-names and values are actual
jenkinsapi.Job objects.
"""
def __init__(self, jenkins):
self.jenkins = jenkins
self._data = []
def _del_data(self, job_name):
if not self._data:
return
for num, job_data in enumerate(self._data):
if job_data['name'] == job_name:
del self._data[num]
return
def __len__(self):
return len(self.keys())
def poll(self, tree='jobs[name,color,url]'):
return self.jenkins.poll(tree=tree)
def __delitem__(self, job_name):
"""
Delete a job by name
:param str job_name: name of a existing job
:raises JenkinsAPIException: When job is not deleted
"""
if job_name in self:
try:
delete_job_url = self[job_name].get_delete_url()
self.jenkins.requester.post_and_confirm_status(
delete_job_url,
data='some random bytes...'
)
self._del_data(job_name)
except JenkinsAPIException:
# Sometimes jenkins throws NPE when removing job
# It removes job ok, but it is good to be sure
# so we re-try if job was not deleted
if job_name in self:
delete_job_url = self[job_name].get_delete_url()
self.jenkins.requester.post_and_confirm_status(
delete_job_url,
data='some random bytes...'
)
self._del_data(job_name)
def __setitem__(self, key, value):
"""
Create Job
:param str key: Job name
:param str value: XML configuration of the job
.. code-block:: python
api = Jenkins('http://localhost:8080/')
new_job = api.jobs['my_new_job'] = config_xml
"""
return self.create(key, value)
def __getitem__(self, job_name):
if job_name in self:
job_data = [job_row for job_row in self._data
if job_row['name'] == job_name or
Job.get_full_name_from_url_and_baseurl(
job_row['url'],
self.jenkins.baseurl) == job_name][0]
return Job(job_data['url'], job_data['name'], self.jenkins)
else:
raise UnknownJob(job_name)
def iteritems(self):
"""
Iterate over the names & objects for all jobs
"""
for job in self.itervalues():
if job.name != job.get_full_name():
yield job.get_full_name(), job
else:
yield job.name, job
def __contains__(self, job_name):
"""
True if job_name exists in Jenkins
"""
return job_name in self.keys()
def iterkeys(self):
"""
Iterate over the names of all available jobs
"""
if not self._data:
self._data = self.poll().get('jobs', [])
for row in self._data:
if row['name'] != \
Job.get_full_name_from_url_and_baseurl(row['url'],
self.jenkins.baseurl):
yield Job.get_full_name_from_url_and_baseurl(
row['url'], self.jenkins.baseurl)
else:
yield row['name']
def itervalues(self):
"""
Iterate over all available jobs
"""
if not self._data:
self._data = self.poll().get('jobs', [])
for row in self._data:
yield Job(row['url'], row['name'], self.jenkins)
def keys(self):
"""
Return a list of the names of all jobs
"""
return list(self.iterkeys())
def create(self, job_name, config):
"""
Create a job
:param str jobname: Name of new job
:param str config: XML configuration of new job
:returns Job: new Job object
"""
if job_name in self:
return self[job_name]
if not config:
raise JenkinsAPIException('Job XML config cannot be empty')
params = {'name': job_name}
try:
if isinstance(config, unicode): # pylint: disable=undefined-variable
config = str(config)
except NameError:
# Python2 already a str
pass
self.jenkins.requester.post_xml_and_confirm_status(
self.jenkins.get_create_url(),
data=config,
params=params
)
# Reset to get it refreshed from Jenkins
self._data = []
return self[job_name]
def create_multibranch_pipeline(self, job_name, config, block=True, delay=60):
"""
Create a multibranch pipeline job
:param str jobname: Name of new job
:param str config: XML configuration of new job
:param block: block until scan is finished?
:param delay: max delay to wait for scan to finish (seconds)
:returns list of new Jobs after scan
"""
if not config:
raise JenkinsAPIException('Job XML config cannot be empty')
params = {'name': job_name}
try:
if isinstance(config, unicode): # pylint: disable=undefined-variable
config = str(config)
except NameError:
# Python2 already a str
pass
self.jenkins.requester.post_xml_and_confirm_status(
self.jenkins.get_create_url(),
data=config,
params=params
)
# Reset to get it refreshed from Jenkins
self._data = []
# Launch a first scan / indexing to discover the branches...
self.jenkins.requester.post_and_confirm_status(
'{}/job/{}/build'.format(self.jenkins.baseurl, job_name),
data='',
valid=[200, 302], # expect 302 without redirects
allow_redirects=False)
start_time = time.time()
# redirect-url does not work with indexing;
# so the only workaround found is to parse the console output untill scan has finished.
scan_finished = False
while not scan_finished and block and time.time() < start_time + delay:
indexing_console_text = self.jenkins.requester.get_url(
'{}/job/{}/indexing/consoleText'.format(self.jenkins.baseurl, job_name))
if indexing_console_text.text.strip().split('\n')[-1].startswith('Finished:'):
scan_finished = True
time.sleep(1)
# now search for all jobs created; those who start with job_name + '/'
jobs = []
for name in self.jenkins.get_jobs_list():
if name.startswith(job_name + '/'):
jobs.append(self[name])
return jobs
def copy(self, job_name, new_job_name):
"""
Copy a job
:param str job_name: Name of an existing job
:param new_job_name: Name of new job
:returns Job: new Job object
"""
params = {'name': new_job_name,
'mode': 'copy',
'from': job_name}
self.jenkins.requester.post_and_confirm_status(
self.jenkins.get_create_url(),
params=params,
data='')
self._data = []
return self[new_job_name]
def rename(self, job_name, new_job_name):
"""
Rename a job
:param str job_name: Name of an existing job
:param str new_job_name: Name of new job
:returns Job: new Job object
"""
params = {'newName': new_job_name}
rename_job_url = self[job_name].get_rename_url()
self.jenkins.requester.post_and_confirm_status(
rename_job_url, params=params, data='')
self._data = []
return self[new_job_name]
def build(self, job_name, params=None, **kwargs):
"""
Executes build of a job
:param str job_name: Job name
:param dict params: Job parameters
:param kwargs: Parameters for Job.invoke() function
:returns QueueItem: Object to track build progress
"""
if params:
assert isinstance(params, dict)
return self[job_name].invoke(build_params=params, **kwargs)
return self[job_name].invoke(**kwargs)
|
|
# -*- coding: utf-8
# 'version': '0.3'
#
# Copyright (c) 2017, Stephen B, Hope, All rights reserved.
#
# CommAI-env Copyright (c) 2016-present, Facebook, Inc., All rights reserved.
# Round1 Copyright (c) 2017-present, GoodAI All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE_CHALLENGE file in the root directory of this source tree.
# TODO task, competition.base competition.objects_properties unresolved ref
from core.task import Task, on_start, on_message, on_sequence, on_state_changed, on_timeout, on_output_message, on_ended
import tasks.competition.messages as msg
from tasks.competition.base import BaseTask
from tasks.competition.objects_properties import global_properties
import random
# use the set of objects from the objects-properties association tasks.
objects = list(set(obj for basket, objects in global_properties.items() for obj in objects))
dirs = ['east', 'west', 'north', 'south']
TIME_CHAR = 8
TIME_VERB = (len("Say 'I xxxxxxxxxxxx' to xxxxxxxxxxxx.") + len("You xxxxxxxxxxxxed.")) * TIME_CHAR
TIME_TURN = (len("I turn right.") + len("You turned.")) * TIME_CHAR
TIME_MOVE = (len("I move forward.") + len("You moved.")) * TIME_CHAR
TIME_PICK = (len("I pick up the xxxxxxxxxxxx.") + len("You picked up the xxxxxxxxxxxx.")) * TIME_CHAR
TIME_GIVE = (len("I give you an xxxxxxxxxxxx.") + len("You gave me an xxxxxxxxxxxx.")) * TIME_CHAR
class TurningTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(TurningTask, self).__init__(max_time=3 * TIME_TURN, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
""" during initalization of task, save the direction the learner is facing # randomly choose a target
direction and save it too # ask the learner to turn in the target direction
:param event:
:return:
"""
self.state.init_dir = self.get_world().state.learner_direction
self.target_turn = random.choice(['left', 'right'])
if self.target_turn == 'right':
self.state.target_direction = self.get_world().get_clockwise_direction(1)
else:
self.state.target_direction = self.get_world().get_clockwise_direction(-1)
self.set_message("Turn {0}.".format(self.target_turn))
@on_state_changed(lambda ws, ts: ws.learner_direction == ts.target_direction)
def on_moved(self, event):
# TODO event not used
"""# reward the learner when it's facing the right direction
:param event:
:return:
"""
self.set_result(1)
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class MovingTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(MovingTask, self).__init__(max_time=3 * TIME_MOVE, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
"""# during initalization task, save the learner's position # save the destination position one step forward
from the learner is ask the learner to move forward
:param event:
:return:
"""
self.state.initial_pos = self.get_world().state.learner_pos
dp = self.get_world().valid_directions[self.get_world().state.learner_direction]
self.state.dest_pos = self.state.initial_pos + dp
self.set_message("Move forward.")
@on_state_changed(lambda ws, ts: ws.learner_pos == ts.dest_pos)
def on_moved(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(1)
@on_timeout()
def fail_learner(self, event):
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class MovingRelativeTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(MovingRelativeTask, self).__init__(max_time=2 * TIME_TURN + 2 * TIME_MOVE, world=world)
@on_start()
def on_start(self, event):
""" # during initalization task, save the direction the learner is facing randomly choose a target direction
and save the position# one step forward in that direction# Ask the learner to move in a particular direction
(left or right)
:param event:
:return:
"""
self.state.init_dir = self.get_world().state.learner_direction
self.state.initial_pos = self.get_world().state.learner_pos
self.target_turn = random.choice(['left', 'right'])
if self.target_turn == 'right':
self.state.target_dir = self.get_world().get_clockwise_direction(1)
else:
self.state.target_dir = self.get_world().get_clockwise_direction(-1)
dp = self.get_world().valid_directions[self.state.target_dir]
self.state.dest_pos = self.state.initial_pos + dp
self.set_message("Move {0}.".format(self.target_turn))
@on_state_changed(lambda ws, ts: ws.learner_pos == ts.dest_pos)
def on_moved(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(1)
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class MovingAbsoluteTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(MovingAbsoluteTask, self).__init__(max_time=8 * TIME_TURN + 4 * TIME_MOVE, world=world)
@on_start()
def on_start(self, event):
"""# during initalization task, save the direction the learner is facing# randomly choose a target direction
and save the position one step forward in that direction# Ask the learner to move in a particular absolute
direction (north, east, south, west)
:param event:
:return:
"""
self.state.init_dir = self.get_world().state.learner_direction
self.state.initial_pos = self.get_world().state.learner_pos
self.target_turn = random.choice(['left', 'right'])
if self.target_turn == 'right':
self.state.target_dir = self.get_world().get_clockwise_direction(1)
else:
self.state.target_dir = self.get_world().get_clockwise_direction(-1)
dp = self.get_world().valid_directions[self.state.target_dir]
self.state.dest_pos = self.state.initial_pos + dp
self.set_message("You are facing {0}, move {1}.".format(self.state.init_dir, self.state.target_dir))
@on_state_changed(lambda ws, ts: ws.learner_pos == ts.dest_pos)
def on_moved(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(1)
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class PickUpTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(PickUpTask, self).__init__(max_time=50 * TIME_CHAR + 2 * TIME_PICK, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
"""# choose some random object# find the cell in front of the learner# place the object there
:param event:
:return:
"""
self.target_obj, = random.sample(objects, 1)
ws = self.get_world().state
ld = self.get_world().valid_directions[ws.learner_direction]
lp = ws.learner_pos
self.state.initial_count = ws.learner_inventory[self.target_obj]
self.get_world().put_entity(lp + ld, self.target_obj, True, True)
self.add_handler(on_state_changed(lambda ws, ts: ws.learner_inventory[self.target_obj] == ts.initial_count + 1)
(self.on_object_picked_up))
self.set_message("You have {indef_object} in front of you. "
"Pick up the {object}.".format(indef_object=msg.indef_article(self.target_obj),
object=self.target_obj))
def on_object_picked_up(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(1)
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class PickUpAroundTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(PickUpAroundTask, self).__init__(
max_time=50 * TIME_CHAR + 2 * TIME_PICK + 4 * TIME_MOVE + 4 * TIME_TURN, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
"""# choose a random object# find a random cell around the learner# place the object there
:param event:
:return:
"""
self.target_obj, = random.sample(objects, 1)
self.direction = random.choice(list(self.get_world().valid_directions.keys()))
ws = self.get_world().state
p = ws.learner_pos + self.get_world().valid_directions[self.direction]
self.state.initial_count = ws.learner_inventory[self.target_obj]
self.get_world().put_entity(p, self.target_obj, True, True)
self.add_handler(on_state_changed(lambda ws, ts: ws.learner_inventory[self.target_obj] == ts.initial_count + 1)
(self.on_object_picked_up))
self.set_message("There is {indef_object} {direction} from you, "
"pick up the {object}.".format(indef_object=msg.indef_article(self.target_obj),
direction=self.direction, object=self.target_obj))
def on_object_picked_up(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(1)
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class PickUpInFrontTask(BaseTask):
"""
"""
max_steps_forward = 10
def __init__(self, world):
"""
:param world:
"""
super(PickUpInFrontTask, self).__init__(max_time=50 * TIME_CHAR + 2 * TIME_PICK +
PickUpInFrontTask.max_steps_forward * TIME_MOVE, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
""" # choose a random object # select a random number of steps # place the object that number of steps in
front of the learner
:param event:
:return:
"""
self.target_obj, = random.sample(objects, 1)
ws = self.get_world().state
self.n = random.randint(1, PickUpInFrontTask.max_steps_forward)
p = ws.learner_pos + self.n * self.get_world().valid_directions[ws.learner_direction]
self.state.initial_count = ws.learner_inventory[self.target_obj]
self.get_world().put_entity(p, self.target_obj, True, True)
self.add_handler(on_state_changed(lambda ws, ts: ws.learner_inventory[self.target_obj] == ts.initial_count + 1)
(self.on_object_picked_up))
self.set_message("There is {indef_object} {n} steps forward, "
"pick up the {object}.".format(indef_object=msg.indef_article(self.target_obj),
n=msg.number_to_string(self.n), object=self.target_obj))
def on_object_picked_up(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(1, "You picked up the {0}.".format(self.target_obj))
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class GivingTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(GivingTask, self).__init__(max_time=50 * TIME_CHAR + 2 * TIME_GIVE, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
"""# pick a random object# give one of it to the learner# save how many objects of this we have# inform the
world that we can expect to receive such an object
:param event:
:return:
"""
ws = self.get_world().state
self.state.target_obj, = random.sample(objects, 1)
ws.learner_inventory[self.state.target_obj] += 1
self.state.initial_count = ws.teacher_inventory[self.state.target_obj]
ws.teacher_accepts.add(self.state.target_obj)
self.set_message("I gave you {indef_object}. Give it back to me "
"by saying \"I give you {indef_object}\"."
.format(indef_object=msg.indef_article(self.state.target_obj)))
@on_state_changed(lambda ws, ts: ws.teacher_inventory[ts.target_obj] == ts.initial_count + 1)
def on_give_me_object(self, event):
"""# if I have one more of the target object, the learner solved the task.
:param event:
:return:
"""
self.set_result(1)
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class PickUpAroundAndGiveTask(BaseTask):
def __init__(self, world):
super(PickUpAroundAndGiveTask, self).__init__(max_time=50 * TIME_CHAR + 4 * TIME_PICK + 4 * TIME_GIVE +
4 * TIME_MOVE + 4 * TIME_TURN, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
"""# pick a random object# save how many objects of this we have# save how many instances of the object the
learner intially had# choose some random direction# measure a cell one step in that direction# put an object
in the given position# initialize a variable to check if the object has been picked up# inform the world that
we can expect to receive such an object
:param event:
:return:
"""
ws = self.get_world().state
target_obj, = random.sample(objects, 1)
self.state.target_obj = target_obj
self.state.initial_count = ws.teacher_inventory[target_obj]
self.state.learner_initial_count = ws.learner_inventory[target_obj]
self.direction = random.choice(list(self.get_world().valid_directions.keys()))
self.obj_pos = ws.learner_pos + self.get_world().valid_directions[self.direction]
self.get_world().put_entity(self.obj_pos, target_obj, True, True)
self.object_picked_up = False
ws.teacher_accepts.add(self.state.target_obj)
self.set_message(
"There is {indef_object} {direction} from you." " Pick it up and give it to me.".format(
indef_object=msg.indef_article(self.state.target_obj), direction=self.direction))
@on_state_changed(lambda ws, ts: ws.learner_inventory[ts.target_obj] == ts.learner_initial_count + 1)
def on_object_picked_up(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.object_picked_up = True
@on_state_changed(lambda ws, ts: ws.teacher_inventory[ts.target_obj] == ts.initial_count + 1)
def on_give_me_object(self, event):
# TODO event not used
""" # if I have one more of the target object, the learner solved the task if it also picked up the object
in the grid.
:param event:
:return:
"""
if self.object_picked_up:
self.set_result(1)
else:
self.set_message("You have to pick up the {object} first.".format(object=self.state.target_obj))
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""# cleaning up
:param event:
:return:
"""
if not self.object_picked_up:
self.get_world().remove_entity(self.obj_pos)
self.set_message(random.choice(msg.timeout))
@on_ended()
def on_ended(self, event):
# TODO event not used
"""# cleanup
:param event:
:return:
"""
self.get_world().state.teacher_accepts.remove(self.state.target_obj)
"""
# Counting + Proprioception
"""
class CountingInventoryTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(CountingInventoryTask, self).__init__(max_time=100 * TIME_CHAR, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
self.target_obj, = random.sample(objects, 1)
self.set_message("How many {0} do you have?".format(msg.pluralize(self.target_obj, 2)))
@on_message("(\d+)\.$")
def on_something_said(self, event):
"""# find out the correct answer# get the answer of the learner and parse it
:param event:
:return:
"""
count = self.get_world().state.learner_inventory[self.target_obj]
answer = event.get_match(1)
num_answer = msg.string_to_number(answer)
if num_answer == count:
self.set_result(1, "Correct!")
else:
self.set_message("{negative_feedback} "
"You have {count} {objects}.".format(negative_feedback=random.choice(msg.failed),
count=count, objects=msg.pluralize(self.target_obj, count)))
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
class CountingInventoryGivingTask(BaseTask):
"""
"""
def __init__(self, world):
"""
:param world:
"""
super(CountingInventoryGivingTask, self).__init__(max_time=1000 * TIME_CHAR, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
self.failed = False
self.time_gave_me_object = None
self.state.target_obj, = random.sample(objects, 1)
self.state.initial_count = self.get_world().state.learner_inventory[self.state.target_obj]
self.set_message("How many {0} do you have?".format(msg.pluralize(self.state.target_obj, 2)))
self.stages = ['initial-query', 'one-more-query', 'waiting-give-back', 'final-query']
self.stage = 'initial-query'
@on_message("(\w+)\.$")
def on_answer_query(self, event):
"""# if we are waiting for an object, then we don't expect an answer to a query# if you just gave me an object,
then this is not the answer for a query# short variable for the world state # we check if the learner's answer
matches the number of instances it has of the given object # get the answer of the learner and parse it #
check if the learner has failed# get a feedback response# reward the learner if it replied correctly all
the questions
:param event:
:return:
"""
if self.stage == 'waiting-give-back':
return
if self.time_gave_me_object == self.get_time():
return
ws = self.get_world().state
count = ws.learner_inventory[self.state.target_obj]
answer = event.get_match(1)
try:
num_answer = msg.string_to_number(answer)
except ValueError:
num_answer = None
self.failed = self.failed or num_answer != count
feedback = random.choice(msg.congratulations) if num_answer == count else random.choice(msg.failed)
if self.stage == 'initial-query':
ws.learner_inventory[self.state.target_obj] += 1
self.set_message(
"{feedback} I gave you {indef_object}. "
"How many {objects} do you have now?".format(indef_object=msg.indef_article(self.state.target_obj),
objects=msg.pluralize(self.state.target_obj, 2), feedback=feedback))
self.stage = 'one-more-query'
elif self.stage == 'one-more-query':
self.set_message("{feedback} Now give the {object} back to me."
.format(object=self.state.target_obj, feedback=feedback))
ws.teacher_accepts.add(self.state.target_obj)
self.stage = 'waiting-give-back'
elif self.stage == 'final-query':
if not self.failed:
self.set_result(1, feedback)
else:
self.set_result(0, feedback)
@on_state_changed(lambda ws, ts: ws.teacher_inventory[ts.target_obj] == ts.initial_count + 1)
def on_gave_me_target_object(self, event):
""" # if I have one more of the target object, the learner solved the task if it also picked up the object in
the grid.
:param event:
:return:
"""
if self.stage == 'waiting-give-back':
self.time_gave_me_object = self.get_time()
self.set_message("Good! You gave me {indef_object}. "
"How many {objects} do you have now?".format(indef_object=msg.indef_article(self.state.target_obj),
objects=msg.pluralize(self.state.target_obj, 2)))
self.stage = 'final-query'
@on_timeout()
def fail_learner(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_message(random.choice(msg.timeout))
@on_ended()
def on_ended(self, event):
# TODO event not used
"""# cleanup
:param event:
:return:
"""
if self.stage in ['waiting-give-back', 'final-query']:
self.get_world().state.teacher_accepts.remove(self.state.target_obj)
class LookTask(BaseTask):
"""
# look in a predifined direction.
"""
def __init__(self, world):
"""
:param world:
"""
super(LookTask, self).__init__(max_time=1000, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.dir = random.choice(dirs)
dir = self.get_world().state.learner_direction
self.set_message("Look to the " + self.dir + "," + " you are currently facing " + dir + ".")
@on_message(r"I look\.$")
def on_message(self, event):
# TODO event not used
"""
:param event:
:return:
"""
dir = self.get_world().state.learner_direction
if dir == self.dir:
self.set_result(1, "Congratulations! " "You are looking in the right direction.")
class LookAroundTask(Task):
"""
# the learner must look around his current position
"""
def __init__(self, world):
"""
:param world:
"""
super(LookAroundTask, self).__init__(max_time=5000, world=world)
@on_start()
def on_start(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.visited_dirs = {'east': False, 'west': False, 'north': False, 'south': False}
self.ndir = 0
dir = self.get_world().state.learner_direction
self.set_message("Look around. You are facing " + dir + ".")
self.state.learner_pos = self.get_world().state.learner_pos
@on_state_changed(lambda ws, ts: ws.learner_pos != ts.learner_pos)
def on_moved(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(0, "You are not allowed to move.")
@on_message(r"I look\.$")
def on_message(self, event):
# TODO event not used
"""
:param event:
:return:
"""
dir = self.get_world().state.learner_direction
if dir in self.visited_dirs and not self.visited_dirs[dir]:
self.visited_dirs[dir] = True
self.ndir += 1
ddir = len(self.visited_dirs) - self.ndir
if ddir == 0:
self.set_result(1, "Congratulations!")
else:
self.set_message(str(ddir) + " directions to go.")
elif dir in self.visited_dirs:
self.set_message("You already look here.")
class FindObjectAroundTask(Task):
"""
# set 4 objects around the learner, ask to find one of them.
"""
def __init__(self, world):
"""
:param world:
"""
super(FindObjectAroundTask, self).__init__(max_time=10000, world=world)
self.dir2obj = [0, 1, 2, 3]
random.shuffle(self.dir2obj)
@on_start()
def on_start(self, event):
# TODO event not used
"""# random assignment of object to location
:param event:
:return:
"""
self.state.learner_pos = self.get_world().state.learner_pos
pos = self.state.learner_pos
pe = self.get_world().put_entity
for i in range(0, len(dirs)):
np = pos + self.get_world().valid_directions[dirs[i]]
pe(np, objects[self.dir2obj[i]], True, True)
self.dir = random.choice(self.dir2obj)
self.obj = objects[self.dir2obj[self.dir]]
self.instructions_completed = False
self.set_message("Pick the " + self.obj + " next to you.")
obj_count = self.get_world().state.learner_inventory[self.obj]
self.add_handler(on_state_changed(lambda ws, ts: ws.learner_inventory[self.obj] == obj_count + 1)
(self.on_object_picked.im_func))
@on_ended()
def on_ended(self, event):
# TODO event not used
"""
:param event:
:return:
"""
pos = self.state.learner_pos
for i in range(0, len(dirs)):
np = pos + self.get_world().valid_directions[dirs[i]]
self.get_world().remove_entity(np)
def on_object_picked(self, event):
# TODO event not used
"""
:param event:
:return:
"""
self.set_result(1, 'Well done!')
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Subscribe function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def _recursive_apply(tensors, apply_fn):
"""Helper method to recursively apply a function to structure of tensors.
The structure of the tensors should take the form similar to fetches in
`tf.Session` and includes single `Tensor`, `list`, nested `list`, `tuple`,
`namedtuple`, or `dict`.
Args:
tensors: Single `Tensor`, `list`, nested `list, `tuple`,
`namedtuple`, or `dict`.
apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
Returns:
Returns the modified tensors with the same structure.
Raises:
`TypeError` if undefined type in the tensors structure.
"""
tensors_type = type(tensors)
if tensors_type is ops.Tensor:
return apply_fn(tensors)
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
if tensors_type is list:
return list(tensors)
elif tensors_type is tuple:
return tuple(tensors)
return tensors_type(*tensors) # collections.namedtuple
elif tensors_type is dict:
return dict([(k, _recursive_apply(v, apply_fn))
for k, v in tensors.iteritems()])
else:
raise TypeError('_recursive_apply argument %r has invalid type %r' %
(tensors, tensors_type))
class _ControlOutputCache(object):
"""Helper class to manage calculating and caching control_outputs in graph."""
def __init__(self):
self.cache = {}
def calc_control_outputs(self, graph):
"""Returns the map of control_outputs for a given graph.
Args:
graph: The graph to parse.
Returns:
A map of the control outputs.
"""
control_outputs = {}
for op in graph.get_operations():
for control_input in op.control_inputs:
if control_input not in control_outputs:
control_outputs[control_input] = set()
control_outputs[control_input].add(op)
return control_outputs
def get_control_outputs(self, op):
"""Return the control outputs for a given op.
Args:
op: The op to fetch control outputs for.
Returns:
Iterable of control output ops.
"""
if op.graph not in self.cache:
control_outputs = self.calc_control_outputs(op.graph)
self.cache[op.graph] = control_outputs
else:
control_outputs = self.cache[op.graph]
return control_outputs.get(op, [])
def _subscribe_new(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
Args:
tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects.
"""
update_input = []
for consumer_op in list(tensor.consumers()): # explicit copy
update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
update_control_input = control_cache.get_control_outputs(tensor.op)
# Trailing slash on name scope to replace the scope.
name_scope = tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
outs = []
for s in side_effects:
outs += s(tensor)
with ops.control_dependencies(outs):
out = array_ops.identity(tensor)
for consumer_op, index in update_input:
consumer_op._update_input(index, out) # pylint: disable=protected-access
for consumer_op in update_control_input:
consumer_op._control_inputs.remove(tensor.op) # pylint: disable=protected-access
consumer_op._control_inputs.append(out.op) # pylint: disable=protected-access
consumer_op._recompute_node_def() # pylint: disable=protected-access
return out
def _subscribe_extend(tensor, side_effects):
"""Helper method to extend the list of side_effects for a subscribed tensor.
Args:
tensor: A `tf.Tensor` as returned by subscribe().
side_effects: List of side_effect functions, see subscribe for details.
Returns:
The given subscribed tensor (for API consistency).
"""
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
# Build the side effect graphs and add their outputs to the list of control
# dependencies for the subscribed tensor.
outs = []
name_scope = source_tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
for s in side_effects:
outs += s(source_tensor)
for out in outs:
out_type = type(out)
if out_type is ops.Tensor:
out = out.op
tensor.op._control_inputs.append(out) # pylint: disable=protected-access
tensor.op._recompute_node_def() # pylint: disable=protected-access
return tensor
def _is_subscribed_identity(tensor):
"""Checks if the given tensor is an identity op returned by `subscribe()`.
Args:
tensor: A `tf.Tensor` to check.
Returns:
True if the given tensor matches the criteria for subscription identies:
its op type is `Identity`, its name matches the name of its input and
conforms to the convention for subscribed nodes.
False otherwise.
"""
# Subscribed tensor are assumed to be identity ops.
if tensor.op.type != 'Identity':
return False
# Check that the tensor name matches the convention in place for identity ops
# created by subscribe().
match = re.match(
r'(?P<prefix_name>^.*?)/subscription/Identity[^/]+', tensor.name)
if match is None or len(match.groups()) != 1:
return False
prefix_name = match.group('prefix_name')
# Get a reference to the source tensor and check that it has a matching name.
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
if prefix_name != source_tensor.op.name:
return False
return True
def _subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This method will check if the given tensor has already been subscribed or if
it's a tensor returned by a previous call to `subscribe()` and, if so, will
reuse the existing identity op, appending the given side effects to the list
of existing ones.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
if _is_subscribed_identity(tensor):
return _subscribe_extend(tensor, side_effects)
# Check if the given tensor has already been subscribed by inspecting its
# outputs.
name_scope = tensor.op.name + '/subscription/Identity'
consumers = tensor.consumers()
matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
'op connected to it').format(tensor.op.name)
if len(matching_ops) == 1:
candidate_tensor = matching_ops[0].outputs[0]
if _is_subscribed_identity(candidate_tensor):
return _subscribe_extend(candidate_tensor, side_effects)
return _subscribe_new(tensor, side_effects, control_cache)
def subscribe(tensors, side_effects):
"""Subscribe to a tensor.
This method will attach side effect graphs to a given set
of tensors. Set of tensors follows from session.run and supports
single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
returns the tensors in the same passed in structure, but as clones with
side effects applied. The supplied side effect graphs are specified
as a constructor function which takes the target tensor and
constructs a side effect graph and returns a list of ops that should
be control dependencies on fetching the tensor. It will append
'subscription' to the name scope of the tensor for every node in
the side effect graph. These control dependencies are what trigger
the side effects. Subscribe will construct the additions to your
graph and return the created identity tensor downstream of the control
dependencies. Use these tensors as you would normally in the rest of
your tensorflow code. If a given tensor has already been subscribed or a
tensor returned by a call to subscribe is passed, the previously created
identity tensor will be reused and the side effect graphs will be added to
the existing ones.
Args:
tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
follows from `Session.run` and supports single `Tensor`, `list`, nested
`list`, `tuple`, `namedtuple`, or `dict`.
side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
return a nonempty list of control dependencies. This can be a single
function or list of functions.
Returns:
Subscribed tensors, which are identity copies of the passed in tensors
in the same passed in structure, but the graph has been modified
such that these are downstream of the control dependencies for
the side effect graphs. Use these functionally equivelant tensors
instead of the passed in tensors for further construction or running.
"""
if not hasattr(side_effects, '__iter__'):
side_effects = [side_effects]
control_outputs = _ControlOutputCache()
result = _recursive_apply(
tensors, lambda t: _subscribe(t, side_effects, control_outputs))
return result
|
|
from __future__ import print_function
import httplib2
import os
import sys
import dateutil.parser as dateparse
try:
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
except ImportError:
print("""
Google API Modules are required:
pip install --upgrade google-api-python-client
Then visit the link below and follow the instructions to setup your API credentials:
https://developers.google.com/google-apps/calendar/quickstart/python
client_secret.json should be placed in ~/.hotbot
""")
import datetime
import readline
import argparse
from jsonconf import jsonconf
import uuid
from random import randint
from croniter import croniter
import logging
log = logging.getLogger(name='__name__')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
log.addHandler(handler)
log.setLevel(logging.INFO)
# pip install --upgrade google-api-python-client
# https://console.developers.google.com/start/api?id=calendar
# https://console.developers.google.com/apis/
SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'HOTBot'
def get_short_unique():
return str(uuid.uuid4())[:8]
def default_input(prompt, default=None):
# https://chistera.yi.org/~dato/blog/entries/2008/02/14/python_raw_input_with_an_editable_default_value_using_readline.html
if not default:
default = ""
def pre_input_hook():
readline.insert_text(default)
readline.redisplay()
prompt += ": "
readline.set_pre_input_hook(pre_input_hook)
try:
return raw_input(prompt)
finally:
readline.set_pre_input_hook(None)
def number_ordinal(n):
# http://stackoverflow.com/questions/9647202/ordinal-numbers-replacement/20007730#20007730
return "%d%s" % (n, "tsnrhtdd"[(n/10 % 10 != 1)*(n % 10 < 4) * n % 10::4])
class HOTBot(object):
guest_values = [
('displayName', 'Guest Name'),
('email', 'Guest email')]
location_values = [
('name', 'Name'),
('location', 'Location'),
('phone', 'Phone'),
('website', 'Website'),
('tag', 'Tag (optional)'),
('reservation', 'Takes reservations')]
event_message_fields = [
('event', 'HOT'),
('day', 10),
('day_ordinal', '10TH'),
('day_name', 'Thursday'),
('month', 2),
('month_ordinal', '3rd'),
('month_name', 'March'),
('year', 2016),
('guest_count', 8),
('name', "Cthulu's Pub"),
('location', "123 Ancient One Ave, R'lyeh, NY"),
('phone', '867-5309'),
('website', 'http://cthuluspub.hp'),
('start_time', '2016-03-10T19:00:00-05:00'),
('short_time', '7:00 PM')
]
event_message_example = (
"It has been decreed, that on {day_name}, the {day_ordinal} "
"day of {month_name}, {year}, that {event} shall be held at "
"{name}. The {guest_count} believers shall arrive at "
"{location} promptly at {short_time}, or risk the wrath of the "
" Ancient Ones.")
def __init__(self, event, flags=None):
self.flags = flags
self.service = None
self.event = event
conf_dir = self.get_conf_dir()
self.event_conf = jsonconf(os.path.join(conf_dir, self.event + ".json"))
self.event_loc_history = jsonconf(os.path.join(conf_dir, self.event + "_history.json"))
if not self.event_loc_history.locations:
self.event_loc_history.locations = {}
self.event_loc_history.save()
self.authorized = False
def get_conf_dir(self):
home_dir = os.path.expanduser('~')
conf_dir = os.path.join(home_dir, '.hotbot')
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
return conf_dir
def get_credentials(self):
conf_dir = self.get_conf_dir()
credential_path = os.path.join(conf_dir, self.event + '_credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
log.warn("No current valid Google credentials. Starting authentication flow...")
flow = client.flow_from_clientsecrets(os.path.join(conf_dir, 'client_secret.json'),
'https://www.googleapis.com/auth/calendar')
flow.user_agent = "HOTBot"
if self.flags:
credentials = tools.run_flow(flow, store, self.flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
log.info('Storing credentials to ' + credential_path)
return credentials
def authorize(self):
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
self.service = discovery.build('calendar', 'v3', http=http)
self.authorized = True
def manage_locations(self):
return self.manage_list('locations', HOTBot.location_values,
'Current Locations', 'name')
def reset_loc_history(self, tag=None):
if not tag:
tag = '*'
log.info("Resetting history for [{}] locations...".format(tag))
del self.event_loc_history.locations[tag]
self.event_loc_history.save()
def add_loc_history(self, loc):
if loc in self.event_conf.locations:
l = self.event_conf.locations[loc]
tag = l['tag'].strip().lower()
if not tag:
tag = '*'
if tag not in self.event_loc_history.locations:
self.event_loc_history.locations[tag] = []
self.event_loc_history.locations[tag].append(loc)
self.event_loc_history.save()
def _get_loc_bins(self):
# sort into bins exclusively
bins = {'*': []}
for k, l in self.event_conf.locations.iteritems():
t = l['tag'].strip().lower()
if not t:
t = '*'
if t not in bins:
bins[t] = []
if (t not in self.event_loc_history.locations or
k not in self.event_loc_history.locations[t]):
bins[t].append(k)
return bins
def get_rand_location(self, start_time, tag=None):
if tag:
tag = tag.strip().lower()
else:
tag = '*'
bins = self._get_loc_bins()
if tag not in bins or len(bins[tag]) == 0:
# we've used them all, try to reset history
self.reset_loc_history(tag=tag)
bins = self._get_loc_bins()
if tag not in bins or len(bins[tag]) == 0:
return None
i = randint(0, len(bins[tag]) - 1)
key = bins[tag][i]
loc = self.event_conf.locations[bins[tag][i]]
info = dict(loc)
info['start_time'] = start_time
time = dateparse.parse(start_time)
info['day_name'] = time.strftime('%A')
info['day'] = time.day
info['day_ordinal'] = number_ordinal(time.day)
info['year'] = time.year
info['month'] = time.month
info['month_name'] = time.strftime('%B')
info['month_ordinal'] = number_ordinal(time.month)
info['short_time'] = str(time.hour % 12) + time.strftime(':%M %p')
info['event'] = self.event
info['guest_count'] = len(self.event_conf.guests)
return (key, loc, info)
def insert_events(self):
print('\nInsert event placeholders using cron format.'
'\nSee https://en.wikipedia.org/wiki/Cron format for details.')
loc_tag = default_input("Location Tag (enter for none)", default="")
if not loc_tag:
loc_tag = None
fmt = '%Y-%m-%d'
base = datetime.datetime.now()
def_base = base_str = base.strftime(fmt)
while True:
base_str = default_input("Start Date", default=def_base)
try:
base = datetime.datetime.strptime(base_str, fmt)
except:
print("Invalid Date Format! Use YYYY-MM-DD")
continue
break
count = def_count = 10
while True:
count = default_input("# Events to Insert", default=str(def_count))
try:
count = int(count)
if count < 1:
raise Exception() # lazy way to handle with less code
except:
print("Please enter a valid integer > 0!")
continue
break
duration = def_dur = 60
while True:
duration = default_input("Event Duration (min)", default=str(def_dur))
try:
duration = int(duration)
if duration < 10:
raise Exception()
except:
print("Please enter a valid integer > 10!")
continue
break
cron_fmt = None
cron = None
events = []
event_objs = []
while True:
while True:
cron_fmt = default_input("Cron Expression", default=cron_fmt)
try:
cron = croniter(cron_fmt, start_time=base)
except:
print('\nInvalid Cron Expression!'
'\nSee https://en.wikipedia.org/wiki/Cron format for examples.')
continue
break
events = []
event_objs = []
for _ in range(count):
evt = cron.get_next(ret_type=datetime.datetime)
event_objs.append(evt)
events.append(evt.strftime(fmt + ' %H:%M'))
print("Events to be inserted: \n" + ", ".join(events))
resp = default_input("\nInsert Events (y) or Edit (e)?", default=None)
if resp.lower().startswith('y'):
break
for evt in event_objs:
self.inser_event_placeholder(evt, duration=duration, loc_tag=loc_tag)
def manage_messages(self):
key = 'messages'
fields = [f[0] for f in HOTBot.event_message_fields]
field_dict = {f[0]: f[1] for f in HOTBot.event_message_fields}
if not self.event_conf[key]:
self.event_conf[key] = []
def delete_item(i):
opt = default_input('Confirm delete? (yes/no)').lower()
if opt.startswith('y'):
del self.event_conf[key][i]
self.event_conf.save()
def check_msg(msg):
while True:
try:
output = msg.format(**field_dict)
print('Rendered message: ' + output.replace("\\n", "\n").replace("\\t", "\t"))
opt = default_input("\ns (save), e (edit) ?").lower()
if opt.startswith('s'):
break
# continue for anything else, e is just for show
except KeyError as e:
bad_key = e.args[0]
print("\nInvalid message field: " + bad_key)
print("\nAvailable message fields:\n" + ", ".join(fields))
except IndexError:
print("Text replacement fields must contain a field name!")
except ValueError as e:
print("Invalid formatting: " + e.args[0])
msg = default_input("\nEdit Message: ", default=msg)
return msg
def edit_item(i):
msg = self.event_conf[key][i]
msg = default_input("Message", msg)
msg = check_msg(msg)
self.event_conf[key][i] = msg
self.event_conf.save()
def new_item():
msg = default_input("New Message")
msg = check_msg(msg)
self.event_conf[key].append(msg)
self.event_conf.save()
def print_items():
count = 1
print('\nMessages for event: {}'.format(self.event))
for msg in self.event_conf[key]:
if len(msg) > 70:
msg = msg[:70] + "..."
print('{}: {}'.format(count, msg))
count += 1
print("")
print("Available message fields:\n" + ", ".join(fields))
print("\nExample message:\n" + HOTBot.event_message_example)
print("\nOutput:\n" + HOTBot.event_message_example.format(**field_dict))
print("")
while True:
if len(self.event_conf[key]):
num = len(self.event_conf[key])
print_items()
opt = default_input("\n1-{} (edit), n (new), d (delete), q (quit)".format(num)).lower()
if opt.startswith('q'):
break
elif opt.startswith('n'):
new_item()
else:
delete = False
try:
if opt.startswith('d'):
delete = True
opt = default_input("(1-{}) select".format(num))
opt_i = int(opt)
opt_i -= 1
# purely to throw exception if out of bounds
self.event_conf[key][opt_i]
except (ValueError, IndexError):
print("Invalid selection. Must be 1-{} or n/d/q".format(num))
continue
if delete:
delete_item(opt_i)
else:
edit_item(opt_i)
else:
print("\nNo current entries. Please add one first...")
new_item()
def get_rand_message(self, info):
messages = self.event_conf.messages
if not messages:
messages = [
("Phone: {phone}"
"\nWebsite: {website}"
"\nReservations: {reservation}"
)
]
i = randint(0, len(messages) - 1)
return messages[i].format(**info).replace("\\n", "\n").replace("\\t", "\t")
def manage_guests(self):
return self.manage_list('guests', HOTBot.guest_values,
'Current Guests', 'displayName')
def manage_list(self, key, values, list_msg, list_field):
if not self.event_conf[key]:
self.event_conf[key] = {}
def delete_item(i):
item = self.event_conf[key][i]
opt = default_input('Confirm delete {}? (yes/no)'.format(item[list_field])).lower()
if opt.startswith('y'):
del self.event_conf[key][i]
self.event_conf.save()
def edit_item(i):
item = self.event_conf[key][i]
for v in values:
self.event_conf[key][i][v[0]] = default_input(v[1], item[v[0]])
self.event_conf.save()
def new_item():
item = {}
for v in values:
item[v[0]] = default_input(v[1])
u = get_short_unique()
self.event_conf[key][u] = item
self.event_conf.save()
def print_items():
count = 1
print('\n{} for event: {}'.format(list_msg, self.event))
self.key_map = []
for i in self.event_conf[key]:
print('{}: {}'.format(count, self.event_conf[key][i][list_field]))
self.key_map.append(i)
count += 1
while True:
if len(self.event_conf[key]):
num = len(self.event_conf[key])
print_items()
opt = default_input("\n1-{} (edit), n (new), d (delete), q (quit)".format(num)).lower()
if opt.startswith('q'):
break
elif opt.startswith('n'):
new_item()
else:
delete = False
try:
if opt.startswith('d'):
delete = True
opt = default_input("(1-{}) select".format(num))
opt_i = int(opt)
opt_i -= 1
# purely to throw exception if out of bounds
self.event_conf[key][self.key_map[opt_i]]
opt = self.key_map[opt_i]
except (ValueError, IndexError):
print("Invalid selection. Must be 1-{} or n/d/q".format(num))
continue
if delete:
delete_item(opt)
else:
edit_item(opt)
else:
print("\nNo current entries. Please add one first...")
new_item()
def get_calendars(self):
if not self.authorized:
self.authorize()
cals = self.service.calendarList().list().execute()
cal_list = {}
for c in cals['items']:
cal_list[c['id']] = c['summary']
return cal_list
def select_host_calendar(self):
if not self.authorized:
self.authorize()
cals = self.get_calendars()
print("\nSelect your host calendar for event: {}".format(self.event))
id_list = []
for c in cals:
id_list.append(c)
print("{}: {}".format(len(id_list), cals[c]))
i = raw_input("Choice: ")
try:
i = int(float(i))
cal_id = id_list[i - 1]
self.event_conf.host_cal = cal_id
except (ValueError, IndexError):
print("Invalid selection! Must be a number between 1 and {}".format(
len(id_list) - 1))
self.event_conf.save()
def get_cal_events(self, event=None, days_future=7, max_results=None):
if not self.authorized:
self.authorize()
if not event:
event = self.event
now = datetime.datetime.utcnow()
end = (now + datetime.timedelta(days=days_future)).isoformat() + 'Z'
now = now.isoformat() + 'Z'
result = self.service.events().list(
calendarId=self.event_conf.host_cal, timeMin=now, timeMax=end,
maxResults=None, singleEvents=True,
orderBy='startTime').execute()
events = result.get('items', [])
event_list = []
for e in events:
if e['summary'].startswith('[' + self.event):
event_list.append(e)
return event_list
def inser_event_placeholder(self, start, duration=120, loc_tag=None):
if not self.authorized:
self.authorize()
tzone = self.service.settings().get(setting='timezone').execute()['value']
fmt = '%Y-%m-%dT%H:%M:00'
name = self.event
if loc_tag:
name += (":" + loc_tag)
name = "[" + name + "]"
end = start + datetime.timedelta(minutes=duration)
event = {
'summary': name,
'start': {
'dateTime': start.strftime(fmt),
'timeZone': tzone,
},
'end': {
'dateTime': end.strftime(fmt),
'timeZone': tzone,
}
}
print("Creating {}, {}...".format(name, start.strftime(fmt)))
res = self.service.events().insert(calendarId=self.event_conf.host_cal,
body=event).execute()
print("Created: {}".format(res.get('htmlLink')))
def update_event(self, event, name, description, location):
if not self.authorized:
self.authorize()
cal_id = self.event_conf.host_cal
event["summary"] = name
event['attendees'] = []
for _, g in self.event_conf.guests.iteritems():
event['attendees'].append(g)
event['location'] = location
event["description"] = description
result = None
try:
result = self.service.events().update(calendarId=cal_id, eventId=event['id'],
body=event, sendNotifications=True).execute()
except:
log.exception("Error updating event!")
return result
def main(flags=None):
bot = HOTBot(event=flags.event, flags=flags)
if not bot.event_conf.host_cal:
log.info("No calendar selected. Loading options...")
bot.select_host_calendar()
if flags.edit_guests:
bot.manage_guests()
elif flags.edit_loc:
bot.manage_locations()
elif flags.edit_msg:
bot.manage_messages()
elif flags.select_cal:
bot.select_host_calendar()
elif flags.ins_events:
bot.insert_events()
else:
events = bot.get_cal_events(days_future=flags.days)
if len(events):
evt = events[0]
summary = evt['summary'].strip('[').strip(']').strip().split(':')
tag = None
if len(summary) > 1:
tag = summary[1]
summary = summary[0]
loc = bot.get_rand_location(evt['start']['dateTime'], tag)
if not loc:
log.error("Unable to find location with given parameters!")
sys.exit(1)
else:
key, loc, info = loc
name = bot.event + " - " + loc['name']
log.info("Creating event: " + name)
description = bot.get_rand_message(info)
location = loc['location']
result = bot.update_event(evt, name, description, location)
if not result:
log.error("There seems to have been an error updating the event. Try again later...")
else:
bot.add_loc_history(key)
log.info("Event update success!")
else:
log.error(("No upcoming events found for {} in the next {} days. "
"Either create a placeholder in GCal or "
"search further into the future with the --days option.")
.format(bot.event, flags.days))
sys.exit(1)
def run_script():
# can use --noauth_local_webserver to manually auth
parser = argparse.ArgumentParser(description='HOTBot automated event scheduler',
parents=[tools.argparser])
parser.add_argument(
"--event",
required=True,
help="Event name (used as key for calendar lookup)")
parser.add_argument(
"--days",
help="Number of days in the future to look for events (default: 7)",
type=int,
default=7)
parser.add_argument(
"--edit-guests", dest='edit_guests',
default=False, action='store_true',
help="Edit guests for event")
parser.add_argument(
"--edit-loc", dest='edit_loc',
default=False, action='store_true',
help="Edit locations for event")
parser.add_argument(
"--edit-msg", dest='edit_msg',
default=False, action='store_true',
help="Edit possible messages for event")
parser.add_argument(
"--select-cal", dest='select_cal',
default=False, action='store_true',
help="Select host calendar")
parser.add_argument(
"--ins-events", dest='ins_events',
default=False, action='store_true',
help="Insert event placeholders into calendar with cron formatting")
flags = parser.parse_args()
try:
main(flags)
except SystemExit:
pass
except:
log.exception("Fatal error occured in script: ")
finally:
logging.shutdown()
if __name__ == '__main__':
run_script()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2016 MuLuu09
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import csv
import sys
import math
import errno
import signal
import socket
import timeit
import datetime
import platform
import threading
import xml.parsers.expat
try:
import gzip
GZIP_BASE = gzip.GzipFile
except ImportError:
gzip = None
GZIP_BASE = object
__version__ = '1.0.2'
class FakeShutdownEvent(object):
"""Class to fake a threading.Event.isSet so that users of this module
are not required to register their own threading.Event()
"""
@staticmethod
def isSet():
"Dummy method to always return false"""
return False
# Some global variables we use
USER_AGENT = None
SOURCE = None
SHUTDOWN_EVENT = FakeShutdownEvent()
SCHEME = 'http'
DEBUG = False
# Used for bound_interface
SOCKET_SOCKET = socket.socket
# Begin import game to handle Python 2 and Python 3
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from httplib import HTTPConnection
except ImportError:
from http.client import HTTPConnection
try:
from httplib import HTTPSConnection
except ImportError:
try:
from http.client import HTTPSConnection
except ImportError:
HTTPSConnection = None
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
from argparse import SUPPRESS as ARG_SUPPRESS
PARSER_TYPE_INT = int
PARSER_TYPE_STR = str
except ImportError:
from optparse import OptionParser as ArgParser
from optparse import SUPPRESS_HELP as ARG_SUPPRESS
PARSER_TYPE_INT = 'int'
PARSER_TYPE_STR = 'string'
try:
from cStringIO import StringIO
BytesIO = None
except ImportError:
try:
from io import StringIO, BytesIO
except ImportError:
from StringIO import StringIO
BytesIO = None
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5.
Taken from https://pypi.python.org/pypi/six/
Modified to set encoding to UTF-8 if not set when stdout may not be
a tty such as when piping to head
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
encoding = fp.encoding or 'UTF-8' # Diverges for notty
if (isinstance(fp, file) and
isinstance(data, unicode) and
encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
# Exception "constants" to support Python 2 through Python 3
try:
import ssl
try:
CERT_ERROR = (ssl.CertificateError,)
except AttributeError:
CERT_ERROR = tuple()
HTTP_ERRORS = ((HTTPError, URLError, socket.error, ssl.SSLError) +
CERT_ERROR)
except ImportError:
HTTP_ERRORS = (HTTPError, URLError, socket.error)
class SpeedtestException(Exception):
"""Base exception for this module"""
class SpeedtestCLIError(SpeedtestException):
"""Generic exception for raising errors during CLI operation"""
class SpeedtestHTTPError(SpeedtestException):
"""Base HTTP exception for this module"""
class SpeedtestConfigError(SpeedtestException):
"""Configuration provided is invalid"""
class ConfigRetrievalError(SpeedtestHTTPError):
"""Could not retrieve config.php"""
class ServersRetrievalError(SpeedtestHTTPError):
"""Could not retrieve speedtest-servers.php"""
class InvalidServerIDType(SpeedtestException):
"""Server ID used for filtering was not an integer"""
class NoMatchedServers(SpeedtestException):
"""No servers matched when filtering"""
class SpeedtestMiniConnectFailure(SpeedtestException):
"""Could not connect to the provided speedtest mini server"""
class InvalidSpeedtestMiniServer(SpeedtestException):
"""Server provided as a speedtest mini server does not actually appear
to be a speedtest mini server
"""
class ShareResultsConnectFailure(SpeedtestException):
"""Could not connect to speedtest.net API to POST results"""
class ShareResultsSubmitFailure(SpeedtestException):
"""Unable to successfully POST results to speedtest.net API after
connection
"""
class SpeedtestUploadTimeout(SpeedtestException):
"""testlength configuration reached during upload
Used to ensure the upload halts when no additional data should be sent
"""
class SpeedtestBestServerFailure(SpeedtestException):
"""Unable to determine best server"""
class GzipDecodedResponse(GZIP_BASE):
"""A file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
Largely copied from ``xmlrpclib``/``xmlrpc.client`` and modified
to work for py2.4-py3
"""
def __init__(self, response):
# response doesn't support tell() and read(), required by
# GzipFile
if not gzip:
raise SpeedtestHTTPError('HTTP response body is gzip encoded, '
'but gzip support is not available')
IO = BytesIO or StringIO
self.io = IO()
while 1:
chunk = response.read(1024)
if len(chunk) == 0:
break
self.io.write(chunk)
self.io.seek(0)
gzip.GzipFile.__init__(self, mode='rb', fileobj=self.io)
def close(self):
try:
gzip.GzipFile.close(self)
finally:
self.io.close()
def get_exception():
"""Helper function to work with py2.4-py3 for getting the current
exception in a try/except block
"""
return sys.exc_info()[1]
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
sock = SOCKET_SOCKET(*args, **kwargs)
sock.bind((SOURCE, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) +
math.cos(math.radians(lat1)) *
math.cos(math.radians(lat2)) * math.sin(dlon / 2) *
math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
def build_user_agent():
"""Build a Mozilla/5.0 compatible User-Agent string"""
global USER_AGENT
if USER_AGENT:
return USER_AGENT
ua_tuple = (
'Mozilla/5.0',
'(%s; U; %s; en-us)' % (platform.system(), platform.architecture()[0]),
'Python/%s' % platform.python_version(),
'(KHTML, like Gecko)',
'speedtest-cli/%s' % __version__
)
USER_AGENT = ' '.join(ua_tuple)
printer(USER_AGENT, debug=True)
return USER_AGENT
def build_request(url, data=None, headers=None, bump=''):
"""Build a urllib2 request object
This function automatically adds a User-Agent header to all requests
"""
if not USER_AGENT:
build_user_agent()
if not headers:
headers = {}
if url[0] == ':':
schemed_url = '%s%s' % (SCHEME, url)
else:
schemed_url = url
if '?' in url:
delim = '&'
else:
delim = '?'
# WHO YOU GONNA CALL? CACHE BUSTERS!
final_url = '%s%sx=%s.%s' % (schemed_url, delim,
int(timeit.time.time() * 1000),
bump)
headers.update({
'User-Agent': USER_AGENT,
'Cache-Control': 'no-cache',
})
printer('%s %s' % (('GET', 'POST')[bool(data)], final_url),
debug=True)
return Request(final_url, data=data, headers=headers)
def catch_request(request):
"""Helper function to catch common exceptions encountered when
establishing a connection with a HTTP/HTTPS request
"""
try:
uh = urlopen(request)
return uh, False
except HTTP_ERRORS:
e = get_exception()
return None, e
def get_response_stream(response):
"""Helper function to return either a Gzip reader if
``Content-Encoding`` is ``gzip`` otherwise the response itself
"""
try:
getheader = response.headers.getheader
except AttributeError:
getheader = response.getheader
if getheader('content-encoding') == 'gzip':
return GzipDecodedResponse(response)
return response
def get_attributes_by_tag_name(dom, tag_name):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tag_name)[0]
return dict(list(elem.attributes.items()))
def print_dots(current, total, start=False, end=False):
"""Built in callback function used by Thread classes for printing
status
"""
if SHUTDOWN_EVENT.isSet():
return
sys.stdout.write('.')
if current + 1 == total and end is True:
sys.stdout.write('\n')
sys.stdout.flush()
def do_nothing(*args, **kwargs):
pass
class HTTPDownloader(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, i, request, start, timeout):
threading.Thread.__init__(self)
self.request = request
self.result = [0]
self.starttime = start
self.timeout = timeout
self.i = i
def run(self):
try:
if (timeit.default_timer() - self.starttime) <= self.timeout:
f = urlopen(self.request)
while (not SHUTDOWN_EVENT.isSet() and
(timeit.default_timer() - self.starttime) <=
self.timeout):
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
class HTTPUploaderData(object):
"""File like object to improve cutting off the upload once the timeout
has been reached
"""
def __init__(self, length, start, timeout):
self.length = length
self.start = start
self.timeout = timeout
self._data = None
self.total = [0]
def _create_data(self):
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
multiplier = int(round(int(self.length) / 36.0))
IO = BytesIO or StringIO
self._data = IO(
('content1=%s' %
(chars * multiplier)[0:int(self.length) - 9]
).encode()
)
@property
def data(self):
if not self._data:
self._create_data()
return self._data
def read(self, n=10240):
if ((timeit.default_timer() - self.start) <= self.timeout and
not SHUTDOWN_EVENT.isSet()):
chunk = self.data.read(n)
self.total.append(len(chunk))
return chunk
else:
raise SpeedtestUploadTimeout
def __len__(self):
return self.length
class HTTPUploader(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, i, request, start, size, timeout):
threading.Thread.__init__(self)
self.request = request
self.request.data.start = self.starttime = start
self.size = size
self.result = None
self.timeout = timeout
self.i = i
def run(self):
request = self.request
try:
if ((timeit.default_timer() - self.starttime) <= self.timeout and
not SHUTDOWN_EVENT.isSet()):
try:
f = urlopen(request)
except TypeError:
# PY24 expects a string or buffer
# This also causes issues with Ctrl-C, but we will concede
# for the moment that Ctrl-C on PY24 isn't immediate
request = build_request(self.request.get_full_url(),
data=request.data.read(self.size))
f = urlopen(request)
f.read(11)
f.close()
self.result = sum(self.request.data.total)
else:
self.result = 0
except (IOError, SpeedtestUploadTimeout):
self.result = sum(self.request.data.total)
class SpeedtestResults(object):
"""Class for holding the results of a speedtest, including:
Download speed
Upload speed
Ping/Latency to test server
Data about server that the test was run against
Additionally this class can return a result data as a dictionary or CSV,
as well as submit a POST of the result data to the speedtest.net API
to get a share results image link.
"""
def __init__(self, download=0, upload=0, ping=0, server=None):
self.download = download
self.upload = upload
self.ping = ping
if server is None:
self.server = {}
else:
self.server = server
self._share = None
self.timestamp = datetime.datetime.utcnow().isoformat()
self.bytes_received = 0
self.bytes_sent = 0
def __repr__(self):
return repr(self.dict())
def share(self):
"""POST data to the speedtest.net API to obtain a share results
link
"""
if self._share:
return self._share
download = int(round(self.download / 1000.0, 0))
ping = int(round(self.ping, 0))
upload = int(round(self.upload / 1000.0, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
api_data = [
'recommendedserverid=%s' % self.server['id'],
'ping=%s' % ping,
'screenresolution=',
'promo=',
'download=%s' % download,
'screendpi=',
'upload=%s' % upload,
'testmethod=http',
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, upload, download, '297aae72'))
.encode()).hexdigest(),
'touchscreen=none',
'startmode=pingselect',
'accuracy=1',
'bytesreceived=%s' % self.bytes_received,
'bytessent=%s' % self.bytes_sent,
'serverid=%s' % self.server['id'],
]
headers = {'Referer': 'http://c.speedtest.net/flash/speedtest.swf'}
request = build_request('://www.speedtest.net/api/api.php',
data='&'.join(api_data).encode(),
headers=headers)
f, e = catch_request(request)
if e:
raise ShareResultsConnectFailure(e)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
raise ShareResultsSubmitFailure('Could not submit results to '
'speedtest.net')
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
raise ShareResultsSubmitFailure('Could not submit results to '
'speedtest.net')
self._share = 'http://www.speedtest.net/result/%s.png' % resultid[0]
return self._share
def dict(self):
"""Return dictionary of result data"""
return {
'download': self.download,
'upload': self.upload,
'ping': self.ping,
'server': self.server,
'timestamp': self.timestamp
}
def csv(self, delimiter=','):
"""Return data in CSV format"""
data = self.dict()
out = StringIO()
writer = csv.writer(out, delimiter=delimiter, lineterminator='')
writer.writerow([data['server']['id'], data['server']['sponsor'],
data['server']['name'], data['timestamp'],
data['server']['d'], data['ping'], data['download'],
data['upload']])
return out.getvalue()
def json(self, pretty=False):
"""Return data in JSON format"""
kwargs = {}
if pretty:
kwargs.update({
'indent': 4,
'sort_keys': True
})
return json.dumps(self.dict(), **kwargs)
class Speedtest(object):
"""Class for performing standard speedtest.net testing operations"""
def __init__(self, config=None):
self.config = {}
self.get_config()
if config is not None:
self.config.update(config)
self.servers = {}
self.closest = []
self.best = {}
self.results = SpeedtestResults()
def get_config(self):
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
headers = {}
if gzip:
headers['Accept-Encoding'] = 'gzip'
request = build_request('://www.speedtest.net/speedtest-config.php',
headers=headers)
uh, e = catch_request(request)
if e:
raise ConfigRetrievalError(e)
configxml = []
stream = get_response_stream(uh)
while 1:
configxml.append(stream.read(1024))
if len(configxml[-1]) == 0:
break
stream.close()
uh.close()
if int(uh.code) != 200:
return None
printer(''.encode().join(configxml), debug=True)
try:
root = ET.fromstring(''.encode().join(configxml))
server_config = root.find('server-config').attrib
download = root.find('download').attrib
upload = root.find('upload').attrib
# times = root.find('times').attrib
client = root.find('client').attrib
except AttributeError:
root = DOM.parseString(''.join(configxml))
server_config = get_attributes_by_tag_name(root, 'server-config')
download = get_attributes_by_tag_name(root, 'download')
upload = get_attributes_by_tag_name(root, 'upload')
# times = get_attributes_by_tag_name(root, 'times')
client = get_attributes_by_tag_name(root, 'client')
ignore_servers = list(
map(int, server_config['ignoreids'].split(','))
)
ratio = int(upload['ratio'])
upload_max = int(upload['maxchunkcount'])
up_sizes = [32768, 65536, 131072, 262144, 524288, 1048576, 7340032]
sizes = {
'upload': up_sizes[ratio - 1:],
'download': [350, 500, 750, 1000, 1500, 2000, 2500,
3000, 3500, 4000]
}
counts = {
'upload': int(upload_max * 2 / len(sizes['upload'])),
'download': int(download['threadsperurl'])
}
threads = {
'upload': int(upload['threads']),
'download': int(server_config['threadcount']) * 2
}
length = {
'upload': int(upload['testlength']),
'download': int(download['testlength'])
}
self.config.update({
'client': client,
'ignore_servers': ignore_servers,
'sizes': sizes,
'counts': counts,
'threads': threads,
'length': length,
'upload_max': upload_max
})
self.lat_lon = (float(client['lat']), float(client['lon']))
return self.config
def get_servers(self, servers=None):
"""Retrieve a the list of speedtest.net servers, optionally filtered
to servers matching those specified in the ``servers`` argument
"""
if servers is None:
servers = []
self.servers.clear()
for i, s in enumerate(servers):
try:
servers[i] = int(s)
except ValueError:
raise InvalidServerIDType('%s is an invalid server type, must '
'be int' % s)
urls = [
'://www.speedtest.net/speedtest-servers-static.php',
'http://c.speedtest.net/speedtest-servers-static.php',
'://www.speedtest.net/speedtest-servers.php',
'http://c.speedtest.net/speedtest-servers.php',
]
headers = {}
if gzip:
headers['Accept-Encoding'] = 'gzip'
errors = []
for url in urls:
try:
request = build_request('%s?threads=%s' %
(url,
self.config['threads']['download']),
headers=headers)
uh, e = catch_request(request)
if e:
errors.append('%s' % e)
raise ServersRetrievalError
stream = get_response_stream(uh)
serversxml = []
while 1:
serversxml.append(stream.read(1024))
if len(serversxml[-1]) == 0:
break
stream.close()
uh.close()
if int(uh.code) != 200:
raise ServersRetrievalError
printer(''.encode().join(serversxml), debug=True)
try:
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError:
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
except (SyntaxError, xml.parsers.expat.ExpatError):
raise ServersRetrievalError
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
if servers and int(attrib.get('id')) not in servers:
continue
if int(attrib.get('id')) in self.config['ignore_servers']:
continue
try:
d = distance(self.lat_lon,
(float(attrib.get('lat')),
float(attrib.get('lon'))))
except:
continue
attrib['d'] = d
try:
self.servers[d].append(attrib)
except KeyError:
self.servers[d] = [attrib]
printer(''.encode().join(serversxml), debug=True)
break
except ServersRetrievalError:
continue
if servers and not self.servers:
raise NoMatchedServers
return self.servers
def set_mini_server(self, server):
"""Instead of querying for a list of servers, set a link to a
speedtest mini server
"""
urlparts = urlparse(server)
name, ext = os.path.splitext(urlparts[2])
if ext:
url = os.path.dirname(server)
else:
url = server
request = build_request(url)
uh, e = catch_request(request)
if e:
raise SpeedtestMiniConnectFailure('Failed to connect to %s' %
server)
else:
text = uh.read()
uh.close()
extension = re.findall('upload_?[Ee]xtension: "([^"]+)"',
text.decode())
if not extension:
for ext in ['php', 'asp', 'aspx', 'jsp']:
try:
f = urlopen('%s/speedtest/upload.%s' % (url, ext))
except:
pass
else:
data = f.read().strip().decode()
if (f.code == 200 and
len(data.splitlines()) == 1 and
re.match('size=[0-9]', data)):
extension = [ext]
break
if not urlparts or not extension:
raise InvalidSpeedtestMiniServer('Invalid Speedtest Mini Server: '
'%s' % server)
self.servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
return self.servers
def get_closest_servers(self, limit=5):
"""Limit servers to the closest speedtest.net servers based on
geographic distance
"""
if not self.servers:
self.get_servers()
for d in sorted(self.servers.keys()):
for s in self.servers[d]:
self.closest.append(s)
if len(self.closest) == limit:
break
else:
continue
break
printer(self.closest, debug=True)
return self.closest
def get_best_server(self, servers=None):
"""Perform a speedtest.net "ping" to determine which speedtest.net
server has the lowest latency
"""
if not servers:
if not self.closest:
servers = self.get_closest_servers()
servers = self.closest
results = {}
for server in servers:
cum = []
url = os.path.dirname(server['url'])
urlparts = urlparse('%s/latency.txt' % url)
printer('%s %s/latency.txt' % ('GET', url), debug=True)
for _ in range(0, 3):
try:
if urlparts[0] == 'https':
h = HTTPSConnection(urlparts[1])
else:
h = HTTPConnection(urlparts[1])
headers = {'User-Agent': USER_AGENT}
start = timeit.default_timer()
h.request("GET", urlparts[2], headers=headers)
r = h.getresponse()
total = (timeit.default_timer() - start)
except HTTP_ERRORS:
e = get_exception()
printer('%r' % e, debug=True)
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000.0, 3)
results[avg] = server
try:
fastest = sorted(results.keys())[0]
except IndexError:
raise SpeedtestBestServerFailure('Unable to connect to servers to '
'test latency.')
best = results[fastest]
best['latency'] = fastest
self.results.ping = fastest
self.results.server = best
self.best.update(best)
printer(best, debug=True)
return best
def download(self, callback=do_nothing):
"""Test download speed against speedtest.net"""
urls = []
for size in self.config['sizes']['download']:
for _ in range(0, self.config['counts']['download']):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(self.best['url']), size, size))
request_count = len(urls)
requests = []
for i, url in enumerate(urls):
requests.append(build_request(url, bump=i))
def producer(q, requests, request_count):
for i, request in enumerate(requests):
thread = HTTPDownloader(i, request, start,
self.config['length']['download'])
thread.start()
q.put(thread, True)
callback(i, request_count, start=True)
finished = []
def consumer(q, request_count):
while len(finished) < request_count:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
callback(thread.i, request_count, end=True)
q = Queue(self.config['threads']['download'])
prod_thread = threading.Thread(target=producer,
args=(q, requests, request_count))
cons_thread = threading.Thread(target=consumer,
args=(q, request_count))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
stop = timeit.default_timer()
self.results.bytes_received = sum(finished)
self.results.download = (
(self.results.bytes_received / (stop - start)) * 8.0
)
if self.results.download > 100000:
self.config['threads']['upload'] = 8
return self.results.download
def upload(self, callback=do_nothing):
"""Test upload speed against speedtest.net"""
sizes = []
for size in self.config['sizes']['upload']:
for _ in range(0, self.config['counts']['upload']):
sizes.append(size)
# request_count = len(sizes)
request_count = self.config['upload_max']
requests = []
for i, size in enumerate(sizes):
# We set ``0`` for ``start`` and handle setting the actual
# ``start`` in ``HTTPUploader`` to get better measurements
requests.append(
(
build_request(
self.best['url'],
HTTPUploaderData(size, 0,
self.config['length']['upload'])
),
size
)
)
def producer(q, requests, request_count):
for i, request in enumerate(requests[:request_count]):
thread = HTTPUploader(i, request[0], start, request[1],
self.config['length']['upload'])
thread.start()
q.put(thread, True)
callback(i, request_count, start=True)
finished = []
def consumer(q, request_count):
while len(finished) < request_count:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
callback(thread.i, request_count, end=True)
q = Queue(self.config['threads']['upload'])
prod_thread = threading.Thread(target=producer,
args=(q, requests, request_count))
cons_thread = threading.Thread(target=consumer,
args=(q, request_count))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
stop = timeit.default_timer()
self.results.bytes_sent = sum(finished)
self.results.upload = (
(self.results.bytes_sent / (stop - start)) * 8.0
)
return self.results.upload
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a SHUTDOWN_EVENT for our threaded
operations
"""
SHUTDOWN_EVENT.set()
print_('\nCancelling...')
sys.exit(0)
def version():
"""Print the version"""
print_(__version__)
sys.exit(0)
def csv_header():
"""Print the CSV Headers"""
print_('Server ID,Sponsor,Server Name,Timestamp,Distance,Ping,Download,'
'Upload')
sys.exit(0)
def parse_args():
"""Function to handle building and parsing of command line arguments"""
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--bytes', dest='units', action='store_const',
const=('byte', 8), default=('bit', 1),
help='Display values in bytes instead of bits. Does '
'not affect the image generated by --share, nor '
'output from --json or --csv')
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true', default=False,
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--csv', action='store_true', default=False,
help='Suppress verbose output, only show basic '
'information in CSV format. Speeds listed in '
'bit/s and not affected by --bytes')
parser.add_argument('--csv-delimiter', default=',', type=PARSER_TYPE_STR,
help='Single character delimiter to use in CSV '
'output. Default ","')
parser.add_argument('--csv-header', action='store_true', default=False,
help='Print CSV headers')
parser.add_argument('--json', action='store_true', default=False,
help='Suppress verbose output, only show basic '
'information in JSON format. Speeds listed in '
'bit/s and not affected by --bytes')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against',
type=PARSER_TYPE_INT)
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--timeout', default=10, type=PARSER_TYPE_INT,
help='HTTP timeout in seconds. Default 10')
parser.add_argument('--secure', action='store_true',
help='Use HTTPS instead of HTTP when communicating '
'with speedtest.net operated servers')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
parser.add_argument('--debug', action='store_true',
help=ARG_SUPPRESS, default=ARG_SUPPRESS)
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
return args
def validate_optional_args(args):
"""Check if an argument was provided that depends on a module that may
not be part of the Python standard library.
If such an argument is supplied, and the module does not exist, exit
with an error stating which module is missing.
"""
optional_args = {
'json': ('json/simplejson python module', json),
'secure': ('SSL support', HTTPSConnection),
}
for arg, info in optional_args.items():
if getattr(args, arg, False) and info[1] is None:
raise SystemExit('%s is not installed. --%s is '
'unavailable' % (info[0], arg))
def printer(string, quiet=False, debug=False, **kwargs):
"""Helper function to print a string only when not quiet"""
if debug and not DEBUG:
return
if debug:
out = '\033[1;30mDEBUG: %s\033[0m' % string
else:
out = string
if not quiet:
print_(out, **kwargs)
def shell():
"""Run the full speedtest.net test"""
global SHUTDOWN_EVENT, SOURCE, SCHEME, DEBUG
SHUTDOWN_EVENT = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
args = parse_args()
# Print the version and exit
if args.version:
version()
if args.csv_header:
csv_header()
if len(args.csv_delimiter) != 1:
raise SystemExit('--csv-delimiter must be a single character')
validate_optional_args(args)
socket.setdefaulttimeout(args.timeout)
# If specified bind to a specific IP address
if args.source:
SOURCE = args.source
socket.socket = bound_socket
if args.secure:
SCHEME = 'https'
debug = getattr(args, 'debug', False)
if debug == 'SUPPRESSHELP':
debug = False
if debug:
DEBUG = True
# Pre-cache the user agent string
build_user_agent()
if args.simple or args.csv or args.json:
quiet = True
else:
quiet = False
# Don't set a callback if we are running quietly
if quiet or debug:
callback = do_nothing
else:
callback = print_dots
printer('Retrieving speedtest.net configuration...', quiet)
try:
speedtest = Speedtest()
except (ConfigRetrievalError, HTTP_ERRORS):
printer('Cannot retrieve speedtest configuration')
raise SpeedtestCLIError(get_exception())
if args.list:
try:
speedtest.get_servers()
except (ServersRetrievalError, HTTP_ERRORS):
print_('Cannot retrieve speedtest server list')
raise SpeedtestCLIError(get_exception())
for _, servers in sorted(speedtest.servers.items()):
for server in servers:
line = ('%(id)5s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
try:
print_(line)
except IOError:
e = get_exception()
if e.errno != errno.EPIPE:
raise
sys.exit(0)
# Set a filter of servers to retrieve
servers = []
if args.server:
servers.append(args.server)
printer('Testing from %(isp)s (%(ip)s)...' % speedtest.config['client'],
quiet)
if not args.mini:
printer('Retrieving speedtest.net server list...', quiet)
try:
speedtest.get_servers(servers)
except NoMatchedServers:
raise SpeedtestCLIError('No matched servers: %s' % args.server)
except (ServersRetrievalError, HTTP_ERRORS):
print_('Cannot retrieve speedtest server list')
raise SpeedtestCLIError(get_exception())
except InvalidServerIDType:
raise SpeedtestCLIError('%s is an invalid server type, must '
'be an int' % args.server)
printer('Selecting best server based on ping...', quiet)
speedtest.get_best_server()
elif args.mini:
speedtest.get_best_server(speedtest.set_mini_server(args.mini))
results = speedtest.results
printer('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % results.server, quiet)
printer('Testing download speed', quiet,
end=('', '\n')[bool(debug)])
speedtest.download(callback=callback)
printer('Download: %0.2f M%s/s' %
((results.download / 1000.0 / 1000.0) / args.units[1],
args.units[0]),
quiet)
printer('Testing upload speed', quiet,
end=('', '\n')[bool(debug)])
speedtest.upload(callback=callback)
printer('Upload: %0.2f M%s/s' %
((results.upload / 1000.0 / 1000.0) / args.units[1],
args.units[0]),
quiet)
if args.simple:
print_('Ping: %s ms\nDownload: %0.2f M%s/s\nUpload: %0.2f M%s/s' %
(results.ping,
(results.download / 1000.0 / 1000.0) / args.units[1],
args.units[0],
(results.upload / 1000.0 / 1000.0) / args.units[1],
args.units[0]))
elif args.csv:
print_(results.csv(delimiter=args.csv_delimiter))
elif args.json:
print_(results.json())
if args.share:
printer('Share results: %s' % results.share(), quiet)
def main():
try:
shell()
except KeyboardInterrupt:
print_('\nCancelling...')
except (SpeedtestException, SystemExit):
e = get_exception()
if getattr(e, 'code', 1) != 0:
raise SystemExit('ERROR: %s' % e)
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from lxml import etree
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import images
libvirt_opts = [
cfg.BoolOpt('libvirt_snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
"""Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
for l in contents.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
def get_fc_hbas():
"""Get the Fibre Channel HBA information."""
out = None
try:
out, err = execute('systool', '-c', 'fc_host', '-v',
run_as_root=True)
except exception.ProcessExecutionError as exc:
# This handles the case where rootwrap is used
# and systool is not installed
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
if exc.exit_code == 96:
LOG.warn(_("systool is not installed"))
return []
except OSError as exc:
# This handles the case where rootwrap is NOT used
# and systool is not installed
if exc.errno == errno.ENOENT:
LOG.warn(_("systool is not installed"))
return []
if out is None:
raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
lines = out.split('\n')
# ignore the first 2 lines
lines = lines[2:]
hbas = []
hba = {}
lastline = None
for line in lines:
line = line.strip()
# 2 newlines denotes a new hba port
if line == '' and lastline == '':
if len(hba) > 0:
hbas.append(hba)
hba = {}
else:
val = line.split('=')
if len(val) == 2:
key = val[0].strip().replace(" ", "")
value = val[1].strip()
hba[key] = value.replace('"', '')
lastline = line
return hbas
def get_fc_hbas_info():
"""Get Fibre Channel WWNs and device paths from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
hbas_info = []
for hba in hbas:
wwpn = hba['port_name'].replace('0x', '')
wwnn = hba['node_name'].replace('0x', '')
device_path = hba['ClassDevicepath']
device = hba['ClassDevice']
hbas_info.append({'port_name': wwpn,
'node_name': wwnn,
'host_device': device,
'device_path': device_path})
return hbas_info
def get_fc_wwpns():
"""Get Fibre Channel WWPNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwpns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwpn = hba['port_name'].replace('0x', '')
wwpns.append(wwpn)
return wwpns
def get_fc_wwnns():
"""Get Fibre Channel WWNNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwnns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwnn = hba['node_name'].replace('0x', '')
wwnns.append(wwnn)
return wwnns
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# This doesn't seem to get inherited so force it to...
# http://paste.ubuntu.com/1213295/
# TODO(harlowja) probably file a bug against qemu-img/qemu
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
# For now don't inherit this due the following discussion...
# See: http://www.gossamer-threads.com/lists/openstack/dev/10592
# if 'preallocation' in base_details:
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
if base_details and base_details.encryption:
cow_opts += ['encryption=%s' % base_details.encryption]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def create_lvm_image(vg, lv, size, sparse=False):
"""Create LVM image.
Creates a LVM image with given size.
:param vg: existing volume group which should hold this image
:param lv: name for this image (logical volume)
:size: size of image in bytes
:sparse: create sparse logical volume
"""
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)db required'
' by volume %(lv)s.') % locals())
if sparse:
preallocated_space = 64 * 1024 * 1024
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)db,'
' but free space on volume group is'
' only %(free_space)db.') % locals())
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
execute(*cmd, run_as_root=True, attempts=3)
def get_volume_group_info(vg):
"""Return free/used/total space info for a volume group in bytes
:param vg: volume group name
:returns: A dict containing:
:total: How big the filesystem is (in bytes)
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
"""
out, err = execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
"""
out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def logical_volume_info(path):
"""Get logical volume info.
:param path: logical volume path
"""
out, err = execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
def logical_volume_size(path):
"""Get logical volume size in bytes.
:param path: logical volume path
"""
# TODO(p-draigbrady) POssibly replace with the more general
# use of blockdev --getsize64 in future
out, _err = execute('lvs', '-o', 'lv_size', '--noheadings', '--units',
'b', '--nosuffix', path, run_as_root=True)
return int(out)
def clear_logical_volume(path):
"""Obfuscate the logical volume.
:param path: logical volume path
"""
# TODO(p-draigbrady): We currently overwrite with zeros
# but we may want to make this configurable in future
# for more or less security conscious setups.
vol_size = logical_volume_size(path)
bs = 1024 * 1024
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = vol_size
# The loop caters for versions of dd that
# don't support the iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (vol_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= 1024 # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def remove_logical_volumes(*paths):
"""Remove one or more logical volume."""
for path in paths:
clear_logical_volume(path)
if paths:
lvremove = ('lvremove', '-f') + paths
execute(*lvremove, attempts=3, run_as_root=True)
def pick_disk_driver_name(is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers, then the name
attribute selects the primary backend driver name, while the optional
type attribute provides the sub-type. For example, xen supports a name
of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2",
while qemu only supports a name of "qemu", but multiple types including
"raw", "bochs", "qcow2", and "qed".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt_type == "xen":
if is_block_dev:
return "phy"
else:
return "tap"
elif CONF.libvirt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
execute('cp', src, dest)
else:
dest = "%s:%s" % (host, dest)
# Try rsync first as that can compress and create sparse dest files.
# Note however that rsync currently doesn't read sparse files
# efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918
# At least network traffic is mitigated with compression.
try:
# Do a relatively light weight test first, so that we
# can fall back to scp, without having run out of space
# on the destination for example.
execute('rsync', '--sparse', '--compress', '--dry-run', src, dest)
except exception.ProcessExecutionError:
execute('scp', src, dest)
else:
execute('rsync', '--sparse', '--compress', src, dest)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def create_snapshot(disk_path, snapshot_name):
"""Create a snapshot in a disk image
:param disk_path: Path to disk image
:param snapshot_name: Name of snapshot in disk image
"""
qemu_img_cmd = ('qemu-img', 'snapshot', '-c', snapshot_name, disk_path)
# NOTE(vish): libvirt changes ownership of images
execute(*qemu_img_cmd, run_as_root=True)
def delete_snapshot(disk_path, snapshot_name):
"""Create a snapshot in a disk image
:param disk_path: Path to disk image
:param snapshot_name: Name of snapshot in disk image
"""
qemu_img_cmd = ('qemu-img', 'snapshot', '-d', snapshot_name, disk_path)
# NOTE(vish): libvirt changes ownership of images
execute(*qemu_img_cmd, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
"""Extract a named snapshot from a disk image
:param disk_path: Path to disk image
:param snapshot_name: Name of snapshot in disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt_snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
# When snapshot name is omitted we do a basic convert, which
# is used by live snapshots.
if snapshot_name is not None:
qemu_img_cmd += ('-s', snapshot_name)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
if CONF.libvirt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
else:
source = domain.find('devices/disk/source')
disk_path = source.get('file') or source.get('dev')
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
return disk_path
def get_disk_type(path):
"""Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
return images.qemu_img_info(path).file_format
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id, user_id, project_id):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
def get_instance_path(instance, forceold=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage, while
handling the fact that we changed the naming style to something more
unique in the grizzly release.
:param instance: the instance we want a path for
:param forceold: force the use of the pre-grizzly format
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
if forceold or os.path.exists(pre_grizzly_name):
return pre_grizzly_name
return os.path.join(CONF.instances_path, instance['uuid'])
|
|
#from builtins import range
from collections import namedtuple
from datetime import datetime
import csv
import math
import time
import sys
import tensorflow.python.platform
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 100,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('forward_only', False,
"""Only run the forward pass.""")
tf.app.flags.DEFINE_boolean('forward_backward_only', False,
"""Only run the forward-forward pass.""")
tf.app.flags.DEFINE_string('data_format', 'NCHW',
"""The data format for Convnet operations.
Can be either NHWC or NCHW.
""")
tf.app.flags.DEFINE_string('csv_file', '',
"""File to output timing information to in csv
format. If not file is passed in, csv file will
not be cteated.
""")
parameters = []
conv_counter = 1
pool_counter = 1
affine_counter = 1
TimingEntry = namedtuple(
'TimingEntry', ['info_string', 'timestamp', 'num_batches', 'mean', 'sd'])
def _conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType):
global conv_counter
global parameters
name = 'conv' + str(conv_counter)
conv_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(tf.truncated_normal([kH, kW, nIn, nOut],
dtype=tf.float32,
stddev=1e-1), name='weights')
if FLAGS.data_format == 'NCHW':
strides = [1, 1, dH, dW]
else:
strides = [1, dH, dW, 1]
conv = tf.nn.conv2d(inpOp, kernel, strides, padding=padType,
data_format=FLAGS.data_format)
biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
trainable=True, name='biases')
bias = tf.reshape(tf.nn.bias_add(conv, biases,
data_format=FLAGS.data_format),
conv.get_shape())
conv1 = tf.nn.relu(bias, name=scope)
parameters += [kernel, biases]
return conv1
def _affine(inpOp, nIn, nOut):
global affine_counter
global parameters
name = 'affine' + str(affine_counter)
affine_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(tf.truncated_normal([nIn, nOut],
dtype=tf.float32,
stddev=1e-1), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
trainable=True, name='biases')
affine1 = tf.nn.relu_layer(inpOp, kernel, biases, name=name)
parameters += [kernel, biases]
return affine1
def _mpool(inpOp, kH, kW, dH, dW):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
if FLAGS.data_format == 'NCHW':
ksize = [1, 1, kH, kW]
strides = [1, 1, dH, dW]
else:
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.max_pool(inpOp,
ksize=ksize,
strides=strides,
padding='VALID',
data_format=FLAGS.data_format,
name=name)
def loss(logits, labels):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
#if layers configuration is changed, you probably should change stacked array size below.
concated = tf.concat([indices, labels], 1)
onehot_labels = tf.sparse_to_dense(
concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=onehot_labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss
def inference(images):
conv1 = _conv (images, 3, 64, 11, 11, 4, 4, 'VALID')
pool1 = _mpool(conv1, 3, 3, 2, 2)
conv2 = _conv(pool1, 64, 192, 5, 5, 1, 1, 'VALID')
pool2 = _mpool(conv2, 3, 3, 2, 2)
conv3 = _conv (pool2, 192, 384, 3, 3, 1, 1, 'SAME')
conv4 = _conv (conv3, 384, 256, 3, 3, 1, 1, 'SAME')
conv5 = _conv (conv4, 256, 256, 3, 3, 1, 1, 'SAME')
pool5 = _mpool(conv5, 3, 3, 2, 2)
resh1 = tf.reshape(pool5, [-1, 256 * 5 * 5])
affn1 = _affine(resh1, 256 * 5 * 5, 768)
affn2 = _affine(affn1, 768, 1024)
affn3 = _affine(affn2, 1024, 1000)
return affn3
'''
def inference(images):
conv1 = _conv (images, 3, 96, 11, 11, 4, 4, 'VALID')
pool1 = _mpool(conv1, 2, 2, 2, 2)
conv2 = _conv(pool1, 96, 256, 5, 5, 1, 1, 'VALID')
pool2 = _mpool(conv2, 2, 2, 2, 2)
conv3 = _conv (pool2, 256, 512, 3, 3, 1, 1, 'SAME')
conv4 = _conv (conv3, 512, 1024, 3, 3, 1, 1, 'SAME')
conv5 = _conv (conv4, 1024, 1024, 3, 3, 1, 1, 'SAME')
pool5 = _mpool(conv5, 2, 2, 2, 2)
resh1 = tf.reshape(pool5, [-1, 1024 * 6 * 6])
affn1 = _affine(resh1, 1024 * 6 * 6, 3072)
affn2 = _affine(affn1, 3072, 4096)
affn3 = _affine(affn2, 4096, 1000)
return affn3
'''
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
if not isinstance(target, list):
target = [target]
target_op = tf.group(*target)
for i in range(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target_op)
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
print ('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, FLAGS.num_batches, mn, sd))
return TimingEntry(info_string, datetime.now(), FLAGS.num_batches, mn, sd)
def store_data_in_csv(timing_entries):
with open(FLAGS.csv_file, 'wb') as csvfile:
writer = csv.writer(csvfile)
for timing_entry in timing_entries:
writer.writerow(
[timing_entry.info_string, timing_entry.timestamp,
timing_entry.num_batches, timing_entry.mean, timing_entry.sd])
def run_benchmark():
global parameters
timing_entries = []
with tf.Graph().as_default():
# Generate some dummy images.
image_size = 231
# Note that our padding definition is slightly different the cuda-convnet.
# In order to force the model to start with the same activations sizes,
# we add 3 to the image_size and employ VALID padding above.
if FLAGS.data_format == 'NCHW':
image_shape = [FLAGS.batch_size, 3, image_size, image_size]
else:
image_shape = [FLAGS.batch_size, image_size, image_size, 3]
images = tf.Variable(tf.random_normal(image_shape,
dtype=tf.float32,
stddev=1e-1))
labels = tf.Variable(tf.ones([FLAGS.batch_size],
dtype=tf.int32))
# Build a Graph that computes the logits predictions from the
# inference model.
last_layer = inference(images)
# Build an initialization operation.
init = tf.global_variables_initializer()
# Start running operations on the Graph.
sess = tf.Session('')
sess.run(init)
run_forward = True
run_forward_backward = True
if FLAGS.forward_only and FLAGS.forward_backward_only:
raise ValueError("Cannot specify --forward_only and "
"--forward_backward_only at the same time.")
if FLAGS.forward_only:
run_forward_backward = False
elif FLAGS.forward_backward_only:
run_forward = False
if run_forward:
# Run the forward benchmark.
timing_entries.append(time_tensorflow_run(sess, last_layer, "Forward"))
if run_forward_backward:
# Add a simple objective so we can calculate the backward pass.
objective = loss(last_layer, labels)
# Compute the gradient with respect to all the parameters.
grad = tf.gradients(objective, parameters)
# Run the backward benchmark.
timing_entries.append(time_tensorflow_run(sess, grad, "Forward-backward"))
if FLAGS.csv_file:
store_data_in_csv(timing_entries)
def main(_):
run_benchmark()
if __name__ == '__main__':
tf.app.run()
|
|
class Stamp:
def __init__(self, name):
self.name = name
self.number = 42
def __call__(self, something):
print("{0} was stamped by {1}".format(something, self.name))
print("number: " + str(self.number))
stamp = Stamp("The government")
stamp("That thing there") # That thing there was stamped by The government
print(40 * '-')
stamppp = Stamp("The government")
stamp("That thing there")
print(getattr(stamppp, 'number'))
# number = int(input("Enter a number: "))
# setattr(stamppp, 'number', int(input("Enter a number: ")))
print(getattr(stamppp, 'number'))
stamppp("Peshko")
print(40 * '-')
class Countable:
_count = 0
def __init__(self, data):
self.data = data
type(self).increase_count()
@classmethod
def increase_count(cls):
cls._count += 1
@classmethod
def decrease_count(cls):
cls._count -= 1
def __del__(self):
type(self).decrease_count()
print(40 * '-')
class GoatSimulator:
goats = []
@staticmethod
def register(name):
GoatSimulator.goats.append(name)
print(len(GoatSimulator.goats), " goats are registered now")
GoatSimulator.register("Pip the Happy Goat")
# 1 goats are registered now
GoatSimulator.register("George the Gutsy Goat")
# 2 goats are registered now
print(40 * '-')
class Vector:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
# def __getitem__(self, i):
# return (self.x, self.y, self.z)[i]
# def __setitem__(self, index, value):
# if index == 0:
# self.x = value
# elif index == 1:
# self.y = value
# elif index == 2:
# self.z = value
# else:
# pass
def __getitem__(self, index):
return getattr(self, ('x', 'y', 'z')[index])
def __setitem__(self, index, value):
return setattr(self, ('x', 'y', 'z')[index], value)
def __str__(self,):
return str((self.x, self.y, self.z))
def __len__(self):
return 3
def __add__(self, other):
return Vector(*map(sum, zip(self, other)))
print(40 * '-')
class Spam:
def __init__(self):
self.eggs = 'larodi'
self.__var = 42
def __getattr__(self, name):
return name.upper()
# def __setattr__(self, name, value):
# print("Setting {0} to {1}".format(name, value))
# return object.__setattr__(self, name.upper(), value + 10)
# def foo(self):
# return 1
def answer(self):
return 42
spam = Spam()
spam.eggs = "Eggs"
print(spam.foo) # FOO
print(spam.bar) # BAR
print(spam.eggs) # Eggs
print(spam.answer()) # 42
spam.foo = 1
spam.bar = 2
print(spam.__dict__) # {'foo': 1, 'bar': 2}
print(spam.__class__) # <class '__main__.Spam'>
print(spam.__class__ is Spam) # True
print(40 * '-')
class Base:
def __init__(self, name, age):
self.__name = name
self._age = age
def report_base(self):
print("Base:", self.__name, self._age)
class Derived(Base):
def __init__(self, name, age, derived_name):
Base.__init__(self, name, age)
self.__name = derived_name
self._age = 33
def report_derived(self):
print("Derived:", self.__name, self._age)
derived = Derived("John", 0, "Doe")
print(derived.report_base()) # Base: John 33
print(derived.report_derived()) # Derived: Doe 33
print(derived._Base__name, derived._Derived__name) # John, Doe
# Mixins
class Screen: # ...
pass
class RadioTransmitter: # ...
pass
class GSMTransmitter(RadioTransmitter): # ...
pass
class Input: # ...
pass
class MultiTouchInput(Input): # ...
pass
class ButtonInput(Input): # ...
pass
class MassStorage: # ...
pass
class ProcessingUnit: # ...
pass
class Phone(ProcessingUnit, Screen, GSMTransmitter,
MultiTouchInput, ButtonInput, MassStorage): # ...
pass
class Tablet(ProcessingUnit, Screen, RadioTransmitter,
MultiTouchInput, MassStorage): # ...
pass
|
|
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXX X XXXXXX XX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXX XXXX
X XX
XXX XXXXXX X XXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXX XXXXXXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXX XXXXX XXX XXX XXXXXX XX XXXXXXXXXXX XXXXXX XX XXXXXXXXXX XXX XX XXXX XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXX XXXXX XX XXXX XXXX XX XXXX XX XX XXXXXX XX XXX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXXXX XX XXX XXXXX XXXXXX XXXX XX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXXXX XX XXXXXXXXXXX
XXXX X XXXXXXX XXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX X
XXXXXXXXX XXXX
X XX
XXX XXXXXX X XXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX
XXXXXXXXXX XXX XXXX XX XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX XXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX
XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXX XXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX XXXXXX XXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXX XXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX
|
|
from copy import copy
from sympy.tensor.array.dense_ndim_array import MutableDenseNDimArray
from sympy import Symbol, Rational, SparseMatrix, diff
from sympy.matrices import Matrix
from sympy.tensor.array.sparse_ndim_array import MutableSparseNDimArray
from sympy.utilities.pytest import raises
def test_ndim_array_initiation():
arr_with_one_element = MutableDenseNDimArray([23])
assert len(arr_with_one_element) == 1
assert arr_with_one_element[0] == 23
assert arr_with_one_element.rank() == 1
raises(ValueError, lambda: arr_with_one_element[1])
arr_with_symbol_element = MutableDenseNDimArray([Symbol('x')])
assert len(arr_with_symbol_element) == 1
assert arr_with_symbol_element[0] == Symbol('x')
assert arr_with_symbol_element.rank() == 1
number5 = 5
vector = MutableDenseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector.rank() == 1
raises(ValueError, lambda: arr_with_one_element[5])
vector = MutableSparseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector._sparse_array == {}
assert vector.rank() == 1
n_dim_array = MutableDenseNDimArray(range(3**4), (3, 3, 3, 3,))
assert len(n_dim_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == (3, 3, 3, 3)
assert n_dim_array.rank() == 4
raises(ValueError, lambda: n_dim_array[0, 0, 0, 3])
raises(ValueError, lambda: n_dim_array[3, 0, 0, 0])
raises(ValueError, lambda: n_dim_array[3**4])
array_shape = (3, 3, 3, 3)
sparse_array = MutableSparseNDimArray.zeros(*array_shape)
assert len(sparse_array._sparse_array) == 0
assert len(sparse_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == array_shape
assert n_dim_array.rank() == 4
one_dim_array = MutableDenseNDimArray([2, 3, 1])
assert len(one_dim_array) == 3
assert one_dim_array.shape == (3,)
assert one_dim_array.rank() == 1
assert one_dim_array.tolist() == [2, 3, 1]
shape = (3, 3)
array_with_many_args = MutableSparseNDimArray.zeros(*shape)
assert len(array_with_many_args) == 3 * 3
assert array_with_many_args.shape == shape
assert array_with_many_args[0, 0] == 0
assert array_with_many_args.rank() == 2
def test_reshape():
array = MutableDenseNDimArray(range(50), 50)
assert array.shape == (50,)
assert array.rank() == 1
array = array.reshape(5, 5, 2)
assert array.shape == (5, 5, 2)
assert array.rank() == 3
assert len(array) == 50
def test_iterator():
array = MutableDenseNDimArray(range(4), (2, 2))
j = 0
for i in array:
assert i == j
j += 1
array = array.reshape(4)
j = 0
for i in array:
assert i == j
j += 1
def test_sparse():
sparse_array = MutableSparseNDimArray([0, 0, 0, 1], (2, 2))
assert len(sparse_array) == 2 * 2
# dictionary where all data is, only non-zero entries are actually stored:
assert len(sparse_array._sparse_array) == 1
assert list(sparse_array) == [0, 0, 0, 1]
for i, j in zip(sparse_array, [0, 0, 0, 1]):
assert i == j
sparse_array[0, 0] = 123
assert len(sparse_array._sparse_array) == 2
assert sparse_array[0, 0] == 123
# when element in sparse array become zero it will disappear from
# dictionary
sparse_array[0, 0] = 0
assert len(sparse_array._sparse_array) == 1
sparse_array[1, 1] = 0
assert len(sparse_array._sparse_array) == 0
assert sparse_array[0, 0] == 0
def test_calculation():
a = MutableDenseNDimArray([1]*9, (3, 3))
b = MutableDenseNDimArray([9]*9, (3, 3))
c = a + b
for i in c:
assert i == 10
assert c == MutableDenseNDimArray([10]*9, (3, 3))
assert c == MutableSparseNDimArray([10]*9, (3, 3))
c = b - a
for i in c:
assert i == 8
assert c == MutableDenseNDimArray([8]*9, (3, 3))
assert c == MutableSparseNDimArray([8]*9, (3, 3))
def test_ndim_array_converting():
dense_array = MutableDenseNDimArray([1, 2, 3, 4], (2, 2))
alist = dense_array.tolist()
alist == [[1, 2], [3, 4]]
matrix = dense_array.tomatrix()
assert (isinstance(matrix, Matrix))
for i in range(len(dense_array)):
assert dense_array[i] == matrix[i]
assert matrix.shape == dense_array.shape
assert MutableDenseNDimArray(matrix) == dense_array
assert MutableDenseNDimArray(matrix.as_immutable()) == dense_array
assert MutableDenseNDimArray(matrix.as_mutable()) == dense_array
sparse_array = MutableSparseNDimArray([1, 2, 3, 4], (2, 2))
alist = sparse_array.tolist()
assert alist == [[1, 2], [3, 4]]
matrix = sparse_array.tomatrix()
assert(isinstance(matrix, SparseMatrix))
for i in range(len(sparse_array)):
assert sparse_array[i] == matrix[i]
assert matrix.shape == sparse_array.shape
assert MutableSparseNDimArray(matrix) == sparse_array
assert MutableSparseNDimArray(matrix.as_immutable()) == sparse_array
assert MutableSparseNDimArray(matrix.as_mutable()) == sparse_array
def test_converting_functions():
arr_list = [1, 2, 3, 4]
arr_matrix = Matrix(((1, 2), (3, 4)))
# list
arr_ndim_array = MutableDenseNDimArray(arr_list, (2, 2))
assert (isinstance(arr_ndim_array, MutableDenseNDimArray))
assert arr_matrix.tolist() == arr_ndim_array.tolist()
# Matrix
arr_ndim_array = MutableDenseNDimArray(arr_matrix)
assert (isinstance(arr_ndim_array, MutableDenseNDimArray))
assert arr_matrix.tolist() == arr_ndim_array.tolist()
assert arr_matrix.shape == arr_ndim_array.shape
def test_equality():
first_list = [1, 2, 3, 4]
second_list = [1, 2, 3, 4]
third_list = [4, 3, 2, 1]
assert first_list == second_list
assert first_list != third_list
first_ndim_array = MutableDenseNDimArray(first_list, (2, 2))
second_ndim_array = MutableDenseNDimArray(second_list, (2, 2))
third_ndim_array = MutableDenseNDimArray(third_list, (2, 2))
fourth_ndim_array = MutableDenseNDimArray(first_list, (2, 2))
assert first_ndim_array == second_ndim_array
second_ndim_array[0, 0] = 0
assert first_ndim_array != second_ndim_array
assert first_ndim_array != third_ndim_array
assert first_ndim_array == fourth_ndim_array
def test_arithmetic():
a = MutableDenseNDimArray([3 for i in range(9)], (3, 3))
b = MutableDenseNDimArray([7 for i in range(9)], (3, 3))
c1 = a + b
c2 = b + a
assert c1 == c2
d1 = a - b
d2 = b - a
assert d1 == d2 * (-1)
e1 = a * 5
e2 = 5 * a
e3 = copy(a)
e3 *= 5
assert e1 == e2 == e3
f1 = a / 5
f2 = copy(a)
f2 /= 5
assert f1 == f2
assert f1[0, 0] == f1[0, 1] == f1[0, 2] == f1[1, 0] == f1[1, 1] == \
f1[1, 2] == f1[2, 0] == f1[2, 1] == f1[2, 2] == Rational(3, 5)
assert type(a) == type(b) == type(c1) == type(c2) == type(d1) == type(d2) \
== type(e1) == type(e2) == type(e3) == type(f1)
def test_higher_dimenions():
m3 = MutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert m3.tolist() == [[[10, 11, 12, 13],
[14, 15, 16, 17],
[18, 19, 20, 21]],
[[22, 23, 24, 25],
[26, 27, 28, 29],
[30, 31, 32, 33]]]
assert m3._get_tuple_index(0) == (0, 0, 0)
assert m3._get_tuple_index(1) == (0, 0, 1)
assert m3._get_tuple_index(4) == (0, 1, 0)
assert m3._get_tuple_index(12) == (1, 0, 0)
assert str(m3) == '[[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]'
m3_rebuilt = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]])
assert m3 == m3_rebuilt
m3_other = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]], (2, 3, 4))
assert m3 == m3_other
def test_slices():
md = MutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert md[:] == md._array
assert md[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]])
assert md[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]])
assert md[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]])
assert md[:, :, :] == md
sd = MutableSparseNDimArray(range(10, 34), (2, 3, 4))
assert sd == MutableSparseNDimArray(md)
assert sd[:] == md._array
assert sd[:] == list(sd)
assert sd[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]])
assert sd[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]])
assert sd[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]])
assert sd[:, :, :] == sd
def test_diff():
from sympy.abc import x, y, z
md = MutableDenseNDimArray([[x, y], [x*z, x*y*z]])
assert md.diff(x) == MutableDenseNDimArray([[1, 0], [z, y*z]])
assert diff(md, x) == MutableDenseNDimArray([[1, 0], [z, y*z]])
sd = MutableSparseNDimArray(md)
assert sd == MutableSparseNDimArray([x, y, x*z, x*y*z], (2, 2))
assert sd.diff(x) == MutableSparseNDimArray([[1, 0], [z, y*z]])
assert diff(sd, x) == MutableSparseNDimArray([[1, 0], [z, y*z]])
|
|
import hashlib
import binascii
import random
import time
import hmac
rng = random.SystemRandom()
def init_key_generation(keylengthbits):
if keylengthbits < 8:
keylengthbits = 8
elif keylengthbits % 8 != 0:
keylengthbits += ( 8 - keylengthbits % 8)
key = []
iters = keylengthbits // 8
for i in range(0,iters):
key.append(format(rng.randint(0,255), '02x'))
return "".join(key)
def do_xor_on_hex(str1,str2):
l1 = len(str1)
if l1 != len(str2) or l1 % 2 != 0:
print("ERROR!")
return "Error"
xor = []
for i in range(0,l1,2):
xor.append(format(int(str1[i:i+2],16)^int(str2[i:i+2],16),"02x"))
return "".join(xor)
def do_xor_on_bytes(bs1,bs2):
l1 = len(bs1)
if l1 != len(bs2):
print("ERROR!")
return "Error"
xor = bytearray()
for i in range(0,l1):
xor.append(bs1[i] ^ bs2[i])
return xor
def hex_transpose(hexstr):
v1 = 0
newhex = []
hexlen = len(hexstr)
for i in range(0,hexlen,2):
newhex.append(hexstr[i+1] + hexstr[i])
newhex2 = newhex[(hexlen//4):] + newhex[0:(hexlen//4)]
#print(newhex2)
return "".join(newhex2)
def byte_transpose(binarr):
binarrlen = len(binarr)
newbin = bytearray()
for i in range(0,binarrlen,2):
newbin.append(binarr[i+1])
newbin.append(binarr[i])
newbin2 = newbin[(binarrlen//2):] + newbin[:(binarrlen//2)]
return newbin2
def generate_header_contents(f_len, password, ver, key_amount):
header = []
if key_amount > 65535 or len(ver) != 2:
return "F"
print('key amount:',key_amount)
key_amount_str = format(key_amount, '02x')
while len(key_amount_str) < 4:
key_amount_str = "0" + key_amount_str
header.append(key_amount_str)
print(header)
final_key_split = []
for i in range(0,key_amount):
cs = init_key_generation(512)
print('salt:',cs)
ck = init_key_generation(512)
final_key_split.append(ck)
#print(hashlib.pbkdf2_hmac('sha512', password.encode(), bytes.fromhex(cs), 10000))
k_xor_mask = bytes.decode(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', password.encode(), bytes.fromhex(cs), 500000)))
ciphered_key = do_xor_on_hex(k_xor_mask,ck)
header.append(cs)
header.append(ciphered_key)
print('version:',ver)
print('length:',f_len)
header.append(ver)
header.append(f_len)
hmac_salt = header[1]
#print(hmac_salt)
k_pbkdf_hmac = hashlib.pbkdf2_hmac('sha512', password.encode(), bytes.fromhex(hmac_salt), 500000)
n_head = "".join(header)
#print(n_head)
hmac_val = hmac.new(k_pbkdf_hmac, n_head.encode(), hashlib.sha512).hexdigest()
n_head_2 = []
n_head_2.append(n_head)
n_head_2.append(hmac_val)
print('key:', "".join(final_key_split))
return "".join(n_head_2), "".join(final_key_split)
def read_header_contents(header_str, password):
key_amount = int(header_str[0:4],16)
print('key amount:',key_amount)
hmac_in_hdr = header_str[-128:]
#print(header_str[4:132])
k_pbkdf_hmac = hashlib.pbkdf2_hmac('sha512', password.encode(), bytes.fromhex(header_str[4:132]), 500000)
hmac_val = hmac.new(k_pbkdf_hmac, header_str[:-128].encode(), hashlib.sha512).hexdigest()
if hmac_in_hdr == hmac_val:
hmac_validated = True
else:
hmac_validated = False
print('read hmac:',hmac_in_hdr)
print('calculated hmac:', hmac_val)
final_key = []
for i in range(0,key_amount):
cs = header_str[(i*256)+4:(i*256)+132]
print('salt:',cs)
ck = header_str[(i*256)+132:(i*256)+260]
#print(hashlib.pbkdf2_hmac('sha512', password.encode(), bytes.fromhex(cs), 10000))
k_xor_mask = bytes.decode(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', password.encode(), bytes.fromhex(cs), 500000)))
deciphered_key = do_xor_on_hex(k_xor_mask,ck)
final_key.append(deciphered_key)
ver = header_str[(key_amount*256)+4:(key_amount*256)+6]
length = header_str[(key_amount*256)+6:-128]
print('version:',ver)
print('length:',length)
fk = "".join(final_key)
print('key:', fk)
return fk, ver, length, hmac_validated
class sha512_nfb(object):
def __init__(self, init_key):
self.current_state = hashlib.sha512(bytes.fromhex(init_key)).digest()
def get_output(self):
initk = self.current_state
self.current_state = hashlib.sha512(initk).digest()
return hashlib.sha512(bytes.fromhex(hex_transpose(bytes.decode(binascii.hexlify(initk))))).hexdigest()
class sha512_efb(object):
def __init__(self, init_key):
self.current_key = bytearray.fromhex(init_key)
self.current_feedback = bytearray(hashlib.sha512(self.current_key).digest())
def get_bytes_to_xor(self):
self.current_key = self.current_key[-1:]+self.current_key[:-1]
self.current_thing_to_hash = self.current_key+self.current_feedback
self.current_feedback = bytearray(hashlib.sha512(self.current_thing_to_hash).digest())
self.current_output_bytes = bytearray(hashlib.sha512(byte_transpose(self.current_thing_to_hash)).digest())
return self.current_output_bytes
class sha512_efb_pfb(object):
def __init__(self, init_key):
self.current_key = bytearray.fromhex(init_key)
self.current_feedback = bytearray(hashlib.sha512(self.current_key).digest())
def get_bytes_to_xor(self,ptxthash):
self.current_key = self.current_key[-1:]+self.current_key[:-1]
self.current_thing_to_hash = self.current_key+self.current_feedback+ptxthash
self.current_feedback = bytearray(hashlib.sha512(self.current_thing_to_hash).digest())
self.current_output_bytes = bytearray(hashlib.sha512(byte_transpose(self.current_thing_to_hash)).digest())
return self.current_output_bytes
def encrypt_file(filename,passtouse,ver,key_par):
ftoe = open(filename,'rb')
ftoe_r = bytearray(ftoe.read())
ftoe_r_l = len(ftoe_r)
print(len(ftoe_r))
timestopad = 64-(ftoe_r_l%64)
for i in range(0,timestopad):
ftoe_r.append(rng.randint(0,255))
f_hash = hashlib.sha512(ftoe_r[0:ftoe_r_l]).digest()
ftoe_r.extend(f_hash)
print(len(ftoe_r))
headercontent, tkey = generate_header_contents(format(ftoe_r_l, '02x'),passtouse,ver,key_par)
nfname = filename + '.header'
nfname_e = filename + '.crypto'
hfi = open(nfname,'w')
hfi.write(headercontent)
hfi.close()
hfi = open(nfname,'r')
tkey2,_,_, hmac_s= read_header_contents(hfi.read(),passtouse)
hfi.close()
if tkey == tkey2 and hmac_s == True:
print('Header file created, written and validated')
ftoe_r_l = len(ftoe_r)
enc_file = bytearray()
timestoencrypt = ftoe_r_l // 64
csc = max(1,int(timestoencrypt/100))
time_st = time.time()
if ver == '01':
cipher_object = sha512_efb(tkey)
for i in range(0,timestoencrypt):
cc = ftoe_r[(i*64):(i*64)+64]
cbx = cipher_object.get_bytes_to_xor()
ce = do_xor_on_bytes(cc,cbx)
enc_file.extend(ce)
if i % csc == 0:
print(str(int(round((i*100/timestoencrypt),0)))+'%')
elif ver == '02':
cipher_object = sha512_efb_pfb(tkey)
iv = hashlib.sha512(bytes.fromhex(hex_transpose(tkey))).digest()
cfb = iv
for i in range(0,timestoencrypt):
cc = ftoe_r[(i*64):(i*64)+64]
cbx = cipher_object.get_bytes_to_xor(cfb)
ce = do_xor_on_bytes(cc,cbx)
cfb = hashlib.sha512(cc).digest()
enc_file.extend(ce)
if i % csc == 0:
print(str(int(round((i*100/timestoencrypt),0)))+'%')
fout = open(nfname_e,'wb')
fout.write(enc_file)
fout.close()
#print('wk:',tkey)
#print('rk:',tkey2)
print('time: ', str(time.time()-time_st))
def decrypt_file(filename,passtouse):
nfname = filename + '.header'
nfname_e = filename + '.crypto'
hfile = open(nfname,'r')
key,ver,hlen,val = read_header_contents(hfile.read(),passtouse)
length = int(hlen,16)
if val == False:
print('Wrong password, or corrupted/tampered header')
x = input('Press Y to continue, other key to quit ')
if (x != 'Y') and (x != 'y'):
return "F"
else:
print('Header read and OK')
efile = open(nfname_e,'rb')
efile_r = efile.read()
d_file = bytearray()
timestodecrypt = len(efile_r) // 64
csc = max(1,int(timestodecrypt/100))
time_st = time.time()
if ver == '01':
cipher_object = sha512_efb(key)
for i in range(0,timestodecrypt):
ce = efile_r[(i*64):(i*64)+64]
cbx = cipher_object.get_bytes_to_xor()
cd = do_xor_on_bytes(ce,cbx)
d_file.extend(cd)
if i % csc == 0:
print(str(int(round((i*100/timestodecrypt),0)))+'%')
elif ver == '02':
cipher_object = sha512_efb_pfb(key)
iv = hashlib.sha512(bytes.fromhex(hex_transpose(key))).digest()
cfb = iv
for i in range(0,timestodecrypt):
ce = efile_r[(i*64):(i*64)+64]
cbx = cipher_object.get_bytes_to_xor(cfb)
cd = do_xor_on_bytes(ce,cbx)
cfb = hashlib.sha512(cd).digest()
d_file.extend(cd)
if i % csc == 0:
print(str(int(round((i*100/timestodecrypt),0)))+'%')
fcalc_hash = hashlib.sha512(d_file[0:length]).digest()
if fcalc_hash == d_file[-64:]:
print('File OK')
else:
print('File has been tampered or corrupted')
x = input('Press Y to continue, other key to quit ')
if (x != 'Y') and (x != 'y'):
return "F"
print('time: ', str(time.time()-time_st))
outf = open(filename,'wb')
outf.write(d_file)
outf.close()
def change_password(filename,password_old,password_new):
nfname = filename + '.header'
nf = open(nfname,'r')
header_str = nf.read()
nf.close()
key_amount = int(header_str[0:4],16)
hmac_in_hdr = header_str[-128:]
k_pbkdf_hmac = hashlib.pbkdf2_hmac('sha512', password_old.encode(), bytes.fromhex(header_str[4:132]), 500000)
hmac_val = hmac.new(k_pbkdf_hmac, header_str[:-128].encode(), hashlib.sha512).hexdigest()
if hmac_in_hdr != hmac_val:
hmac_validated = False
print('Wrong password, or corrupted/tampered header')
print('If you continue, damage could be irreversible')
x = input('Press Y to continue, other key to quit ')
if (x != 'Y') and (x != 'y'):
return "F"
else:
hmac_validated = True
print('read hmac:',hmac_in_hdr)
print('calculated hmac:', hmac_val)
new_header = []
new_header.append(header_str[0:4])
for i in range(0,key_amount):
cs = header_str[(i*256)+4:(i*256)+132]
ck = header_str[(i*256)+132:(i*256)+260]
k_xor_mask_d = bytes.decode(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', password_old.encode(), bytes.fromhex(cs), 500000)))
deciphered_key = do_xor_on_hex(k_xor_mask_d,ck)
k_xor_mask_e = bytes.decode(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', password_new.encode(), bytes.fromhex(cs), 500000)))
reciphered_key = do_xor_on_hex(k_xor_mask_e,deciphered_key)
new_header.append(cs)
new_header.append(reciphered_key)
ver = header_str[(key_amount*256)+4:(key_amount*256)+6]
length = header_str[(key_amount*256)+6:-128]
new_header.append(ver)
new_header.append(length)
f_header = "".join(new_header)
k_pbkdf_hmac_n = hashlib.pbkdf2_hmac('sha512', password_new.encode(), bytes.fromhex(f_header[4:132]), 500000)
hmac_val_n = hmac.new(k_pbkdf_hmac_n, f_header.encode(), hashlib.sha512).hexdigest()
nh = []
nh.append(f_header)
nh.append(hmac_val_n)
finalr_head = "".join(nh)
finalf = open(nfname,'w')
finalf.write(finalr_head)
finalf.close()
print('Done!')
# k2 = init_key_generation(512)
# k3 = binascii.hexlify(binascii.unhexlify(k2))
# print(k3)
# print(bytes.decode(k3))
# print(bytes.decode(binascii.unhexlify(k2)))
# size = int(input("Enter file size in MB"))
# rs = size * 16384
# cs = 0
# nf = open("rngtest2.bin","wb")
# while cs < rs:
# if cs % 327680 == 0:
# print(str(cs//16384)+"MB")
# nf.write(binascii.unhexlify(k1.get_output()))
# cs += 1
# nf.close()
f = open("testheader.txt",'w')
# file_length = format(int(input('length')), '02x')
# version = format(int(input('version')), '02x')
# k_am = int(input('key amount'))
# password = str(input('password'))
# file_length = format(1321315234631,'02x')
#version = '01'
#k_am = 1
# password = 'SAFW324cs'
# temp = generate_header_contents(file_length, password, version, k_am)
# f.write(temp)
# f.close()
# test_key =read_header_contents(temp, password)
# testc = sha512_efb(test_key)
# print()
# print(binascii.hexlify(testc.current_key))
# print(binascii.hexlify(testc.current_feedback))
# for i in range(1,20):
# print(binascii.hexlify(testc.get_bytes_to_xor()))
# print('key:',binascii.hexlify(testc.current_key))
#print('feed:',binascii.hexlify(testc.current_feedback))
print('Encryption Test v2 r1.0')
print('By fabrizziop')
print('MIT licence')
ed = int(input('1: Encrypt, 2: Decrypt, 3: Change Password '))
if ed == 1:
fname = input('File name to encrypt: ')
k_am = int(input('Key length = 512 * '))
passw = input('Password: ')
ver = int(input('Version: '))
if ver == 1:
version = '01'
elif ver == 2:
version = '02'
encrypt_file(fname,passw,version,k_am)
elif ed == 2:
fname = input('File name to decrypt: ')
passw = input('Password: ')
decrypt_file(fname,passw)
elif ed == 3:
fname = input('File name to change password: ')
passw = input('Old Password: ')
passwn = input('New Password: ')
change_password(fname, passw, passwn)
# k1 = init_key_generation(512)
# print(k1)
# print(hex_transpose(k1))
# k1b = bytes.fromhex(k1)
# print(k1b)
# k1bt = byte_transpose(k1b)
# print(k1bt)
# k1btt = byte_transpose(k1bt)
# print(bytes.decode(binascii.hexlify(k1bt)))
# print(bytes.decode(binascii.hexlify(k1btt)))
# k2 = init_key_generation(512)
# xor_hex = do_xor_on_hex(k1,k2)
# print(xor_hex)
# k3 = bytes.fromhex(k1)
# k4 = bytes.fromhex(k2)
# xor_bytes = do_xor_on_bytes(k3,k4)
# print(binascii.hexlify(xor_bytes))
# print(xor_bytes)
#print(k1.current_state)
|
|
import base64
import imp
import os
import pickle
import sys
import tempfile
from StringIO import StringIO
from pygresql.pg import DatabaseError
from gparray import GpDB, GpArray, Segment
import shutil
from mock import *
from gp_unittest import *
from gphostcache import GpHost
db_singleton_side_effect_list = []
def singleton_side_effect(unused1, unused2):
# this function replaces dbconn.execSQLForSingleton(conn, sql), conditionally raising exception
if db_singleton_side_effect_list[0] == "DatabaseError":
raise DatabaseError("mock exception")
return db_singleton_side_effect_list[0]
class GpConfig(GpTestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.postgressql_conf = self.temp_dir + "/postgresql.conf"
with open(self.postgressql_conf, "w") as postgresql:
postgresql.close()
# because gpconfig does not have a .py extension,
# we have to use imp to import it
# if we had a gpconfig.py, this is equivalent to:
# import gpconfig
# self.subject = gpconfig
gpconfig_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gpconfig")
self.subject = imp.load_source('gpconfig', gpconfig_file)
self.subject.LOGGER = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning', 'fatal'])
self.conn = Mock()
self.cursor = FakeCursor()
self.os_env = dict(USER="my_user")
self.os_env["MASTER_DATA_DIRECTORY"] = self.temp_dir
self.gparray = self._create_gparray_with_2_primary_2_mirrors()
self.host_cache = Mock()
self.host = GpHost('localhost')
seg = Segment()
db = self.gparray.master
seg.addPrimary(db)
seg.datadir = self.gparray.master.datadir
seg.hostname = 'localhost'
self.host.addDB(seg)
self.host_cache.get_hosts.return_value = [self.host]
self.host_cache.ping_hosts.return_value = []
self.master_read_config = Mock()
self.master_read_config.get_guc_value.return_value = "foo"
self.master_read_config.get_seg_content_id.return_value = -1
self.segment_read_config = Mock()
self.segment_read_config.get_guc_value.return_value = "foo"
self.segment_read_config.get_seg_content_id.return_value = 0
self.pool = Mock()
self.pool.getCompletedItems.return_value = [self.master_read_config, self.segment_read_config]
self.apply_patches([
patch('os.environ', new=self.os_env),
patch('gpconfig.dbconn.connect', return_value=self.conn),
patch('gpconfig.dbconn.execSQL', return_value=self.cursor),
patch('gpconfig.dbconn.execSQLForSingleton', side_effect=singleton_side_effect),
patch('gpconfig.GpHostCache', return_value=self.host_cache),
patch('gpconfig.GpArray.initFromCatalog', return_value=self.gparray),
patch('gpconfig.GpReadConfig', return_value=self.master_read_config),
patch('gpconfig.WorkerPool', return_value=self.pool)
])
sys.argv = ["gpconfig"] # reset to relatively empty args list
def tearDown(self):
shutil.rmtree(self.temp_dir)
super(GpConfig, self).tearDown()
del db_singleton_side_effect_list[:]
def test_when_no_options_prints_and_raises(self):
with self.assertRaisesRegexp(Exception, "No action specified. See the --help info."):
self.subject.do_main()
self.subject.LOGGER.error.assert_called_once_with("No action specified. See the --help info.")
def test_option_list_parses(self):
sys.argv = ["gpconfig", "--list"]
options = self.subject.parseargs()
self.assertEquals(options.list, True)
def test_option_value_must_accompany_option_change_raise(self):
sys.argv = ["gpconfig", "--change", "statement_mem"]
with self.assertRaisesRegexp(Exception, "change requested but value not specified"):
self.subject.parseargs()
self.subject.LOGGER.error.assert_called_once_with("change requested but value not specified")
def test_option_show_without_master_data_dir_will_succeed(self):
sys.argv = ["gpconfig", "--show", "statement_mem"]
del self.os_env["MASTER_DATA_DIRECTORY"]
self.subject.parseargs()
@patch('sys.stdout', new_callable=StringIO)
def test_option_show_with_port_will_succeed(self, mock_stdout):
sys.argv = ["gpconfig", "--show", "port"]
# select * from gp_toolkit.gp_param_setting('port'); ;
# paramsegment | paramname | paramvalue
# --------------+-----------+------------
self.cursor.set_result_for_testing([['-1', 'port', '1234'], ['0', 'port', '3456']])
self.subject.do_main()
self.assertIn("GUC : port\nContext: -1 Value: 1234\nContext: 0 Value: 3456\n",
mock_stdout.getvalue())
def test_option_f_parses(self):
sys.argv = ["gpconfig", "--file", "--show", "statement_mem"]
options = self.subject.parseargs()
self.assertEquals(options.show, "statement_mem")
self.assertEquals(options.file, True)
def test_option_file_with_option_change_will_raise(self):
sys.argv = ["gpconfig", "--file", "--change", "statement_mem"]
with self.assertRaisesRegexp(Exception, "'--file' option must accompany '--show' option"):
self.subject.parseargs()
self.subject.LOGGER.error.assert_called_once_with("'--file' option must accompany '--show' option")
def test_option_file_compare_with_file_will_raise(self):
sys.argv = ["gpconfig", "--file", "--show", "statement_mem", "--file-compare", ]
with self.assertRaisesRegexp(Exception, "'--file' option and '--file-compare' option cannot be used together"):
self.subject.parseargs()
self.subject.LOGGER.error.assert_called_once_with("'--file' option and '--file-compare' option cannot be used together")
def test_option_file_with_option_list_will_raise(self):
sys.argv = ["gpconfig", "--file", "--list", "statement_mem"]
with self.assertRaisesRegexp(Exception, "'--file' option must accompany '--show' option"):
self.subject.parseargs()
self.subject.LOGGER.error.assert_called_once_with("'--file' option must accompany '--show' option")
def test_option_file_without_master_data_dir_will_raise(self):
sys.argv = ["gpconfig", "--file", "--show", "statement_mem"]
del self.os_env["MASTER_DATA_DIRECTORY"]
with self.assertRaisesRegexp(Exception, "--file option requires that MASTER_DATA_DIRECTORY be set"):
self.subject.parseargs()
self.subject.LOGGER.error.assert_called_once_with("--file option requires that MASTER_DATA_DIRECTORY be set")
@patch('sys.stdout', new_callable=StringIO)
def test_option_f_will_report_presence_of_setting(self, mock_stdout):
sys.argv = ["gpconfig", "--show", "my_property_name", "--file"]
self.subject.do_main()
self.pool.addCommand.assert_called_once_with(self.master_read_config)
self.pool.join.assert_called_once_with()
self.pool.check_results.assert_called_once_with()
self.pool.haltWork.assert_called_once_with()
self.pool.joinWorkers.assert_called_once_with()
self.assertEqual(self.subject.LOGGER.error.call_count, 0)
self.assertIn("Master value: foo\nSegment value: foo", mock_stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_option_f_will_report_absence_of_setting(self, mock_stdout):
sys.argv = ["gpconfig", "--show", "my_property_name", "--file"]
self.master_read_config.get_guc_value.return_value = "-"
self.segment_read_config.get_guc_value.return_value = "seg_value"
self.subject.do_main()
self.assertEqual(self.subject.LOGGER.error.call_count, 0)
self.assertIn("Master value: -\nSegment value: seg_value", mock_stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_option_f_will_report_difference_segments_out_of_sync(self, mock_stdout):
sys.argv = ["gpconfig", "--show", "my_property_name", "--file"]
self.master_read_config.get_guc_value.return_value = 'foo'
self.segment_read_config.get_guc_value.return_value = 'bar'
another_segment_read_config = Mock()
another_segment_read_config.get_guc_value.return_value = "baz"
another_segment_read_config.get_seg_content_id.return_value = 1
self.pool.getCompletedItems.return_value.append(another_segment_read_config)
self.host_cache.get_hosts.return_value.extend([self.host, self.host])
self.subject.do_main()
self.assertEqual(self.pool.addCommand.call_count, 3)
self.assertEqual(self.subject.LOGGER.error.call_count, 0)
self.assertIn("WARNING: GUCS ARE OUT OF SYNC", mock_stdout.getvalue())
self.assertIn("bar", mock_stdout.getvalue())
self.assertIn("[name: my_property_name] [value: baz]", mock_stdout.getvalue())
def test_option_change_value_master_separate_succeed(self):
db_singleton_side_effect_list.append("some happy result")
entry = 'my_property_name'
sys.argv = ["gpconfig", "-c", entry, "-v", "100", "-m", "20"]
# 'SELECT name, setting, unit, short_desc, context, vartype, min_val, max_val FROM pg_settings'
self.cursor.set_result_for_testing([['my_property_name', 'setting', 'unit', 'short_desc',
'context', 'vartype', 'min_val', 'max_val']])
self.subject.do_main()
self.subject.LOGGER.info.assert_called_with("completed successfully")
self.assertEqual(self.pool.addCommand.call_count, 2)
segment_command = self.pool.addCommand.call_args_list[0][0][0]
self.assertTrue("my_property_name" in segment_command.cmdStr)
value = base64.urlsafe_b64encode(pickle.dumps("100"))
self.assertTrue(value in segment_command.cmdStr)
master_command = self.pool.addCommand.call_args_list[1][0][0]
self.assertTrue("my_property_name" in master_command.cmdStr)
value = base64.urlsafe_b64encode(pickle.dumps("20"))
self.assertTrue(value in master_command.cmdStr)
def test_option_change_value_masteronly_succeed(self):
db_singleton_side_effect_list.append("some happy result")
entry = 'my_property_name'
sys.argv = ["gpconfig", "-c", entry, "-v", "100", "--masteronly"]
# 'SELECT name, setting, unit, short_desc, context, vartype, min_val, max_val FROM pg_settings'
self.cursor.set_result_for_testing([['my_property_name', 'setting', 'unit', 'short_desc',
'context', 'vartype', 'min_val', 'max_val']])
self.subject.do_main()
self.subject.LOGGER.info.assert_called_with("completed successfully")
self.assertEqual(self.pool.addCommand.call_count, 1)
master_command = self.pool.addCommand.call_args_list[0][0][0]
self.assertTrue(("my_property_name") in master_command.cmdStr)
value = base64.urlsafe_b64encode(pickle.dumps("100"))
self.assertTrue(value in master_command.cmdStr)
def test_option_change_value_master_separate_fail_not_valid_guc(self):
db_singleton_side_effect_list.append("DatabaseError")
with self.assertRaisesRegexp(Exception, "not a valid GUC: my_property_name"):
sys.argv = ["gpconfig", "-c", "my_property_name", "-v", "100", "-m", "20"]
self.subject.do_main()
self.assertEqual(self.subject.LOGGER.fatal.call_count, 1)
def test_option_change_value_hidden_guc_with_skipvalidation(self):
sys.argv = ["gpconfig", "-c", "my_hidden_guc_name", "-v", "100", "--skipvalidation"]
self.subject.do_main()
self.subject.LOGGER.info.assert_called_with("completed successfully")
self.assertEqual(self.pool.addCommand.call_count, 2)
segment_command = self.pool.addCommand.call_args_list[0][0][0]
self.assertTrue("my_hidden_guc_name" in segment_command.cmdStr)
master_command = self.pool.addCommand.call_args_list[1][0][0]
self.assertTrue("my_hidden_guc_name" in master_command.cmdStr)
value = base64.urlsafe_b64encode(pickle.dumps("100"))
self.assertTrue(value in master_command.cmdStr)
def test_option_change_value_hidden_guc_without_skipvalidation(self):
db_singleton_side_effect_list.append("my happy result")
with self.assertRaisesRegexp(Exception, "GUC Validation Failed: my_hidden_guc_name cannot be changed under "
"normal conditions. Please refer to gpconfig documentation."):
sys.argv = ["gpconfig", "-c", "my_hidden_guc_name", "-v", "100"]
self.subject.do_main()
self.subject.LOGGER.fatal.assert_called_once_with("GUC Validation Failed: my_hidden_guc_name cannot be "
"changed under normal conditions. "
"Please refer to gpconfig documentation.")
@patch('sys.stdout', new_callable=StringIO)
def test_option_file_compare_returns_same_value(self, mock_stdout):
sys.argv = ["gpconfig", "-s", "my_property_name", "--file-compare"]
self.master_read_config.get_guc_value.return_value = 'foo'
self.master_read_config.get_seg_content_id.return_value = -1
self.segment_read_config.get_guc_value.return_value = 'foo'
self.segment_read_config.get_seg_content_id.return_value = 0
another_segment_read_config = Mock()
another_segment_read_config.get_guc_value.return_value = "foo"
another_segment_read_config.get_seg_content_id.return_value = 1
self.pool.getCompletedItems.return_value.append(another_segment_read_config)
self.cursor.set_result_for_testing([[-1, 'my_property_name', 'foo'],
[0, 'my_property_name', 'foo'],
[1, 'my_property_name', 'foo']])
self.subject.do_main()
self.assertIn("Master value: foo | file: foo", mock_stdout.getvalue())
self.assertIn("Segment value: foo | file: foo", mock_stdout.getvalue())
self.assertIn("Values on all segments are consistent", mock_stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_option_file_compare_returns_different_value(self, mock_stdout):
sys.argv = ["gpconfig", "-s", "my_property_name", "--file-compare"]
self.master_read_config.get_guc_value.return_value = 'foo'
self.master_read_config.get_seg_content_id.return_value = -1
self.master_read_config.get_seg_dbid.return_value = 0
self.segment_read_config.get_guc_value.return_value = 'foo'
self.segment_read_config.get_seg_content_id.return_value = 0
self.segment_read_config.get_seg_dbid.return_value = 1
another_segment_read_config = Mock()
another_segment_read_config.get_guc_value.return_value = "bar"
another_segment_read_config.get_seg_content_id.return_value = 1
another_segment_read_config.get_seg_dbid.return_value = 2
self.pool.getCompletedItems.return_value.append(another_segment_read_config)
self.cursor.set_result_for_testing([[-1, 'my_property_name', 'foo'],
[0, 'my_property_name', 'foo'],
[1, 'my_property_name', 'foo']])
self.subject.do_main()
self.assertIn("WARNING: GUCS ARE OUT OF SYNC: ", mock_stdout.getvalue())
self.assertIn("[context: -1] [dbid: 0] [name: my_property_name] [value: foo | file: foo]",
mock_stdout.getvalue())
self.assertIn("[context: 0] [dbid: 1] [name: my_property_name] [value: foo | file: foo]",
mock_stdout.getvalue())
self.assertIn("[context: 1] [dbid: 2] [name: my_property_name] [value: foo | file: bar]",
mock_stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_option_file_compare_with_standby_master_with_different_file_value_will_report_failure(self, mock_stdout):
sys.argv = ["gpconfig", "-s", "my_property_name", "--file-compare"]
self.cursor.set_result_for_testing([[-1, 'my_property_name', 'foo']])
self.master_read_config.get_guc_value.return_value = 'foo'
self.master_read_config.get_seg_content_id.return_value = -1
self.master_read_config.get_seg_dbid.return_value = 0
# standby mirror with bad file value
self.segment_read_config.get_guc_value.return_value = 'foo'
self.segment_read_config.get_seg_content_id.return_value = 0
self.segment_read_config.get_seg_dbid.return_value = 1
standby_segment_read_config = Mock()
standby_segment_read_config.get_guc_value.return_value = "bar"
standby_segment_read_config.get_seg_content_id.return_value = -1
standby_segment_read_config.get_seg_dbid.return_value = 2
self.pool.getCompletedItems.return_value.append(standby_segment_read_config)
self.subject.do_main()
self.assertIn("WARNING: GUCS ARE OUT OF SYNC: ", mock_stdout.getvalue())
self.assertIn("[context: -1] [dbid: 0] [name: my_property_name] [value: foo | file: foo]",
mock_stdout.getvalue())
self.assertIn("[context: -1] [dbid: 2] [name: my_property_name] [value: foo | file: bar]",
mock_stdout.getvalue())
@staticmethod
def _create_gparray_with_2_primary_2_mirrors():
master = GpDB.initFromString(
"1|-1|p|p|s|u|mdw|mdw|5432|None|/data/master||/data/master/base/10899,/data/master/base/1,/data/master/base/10898,/data/master/base/25780,/data/master/base/34782")
primary0 = GpDB.initFromString(
"2|0|p|p|s|u|sdw1|sdw1|40000|41000|/data/primary0||/data/primary0/base/10899,/data/primary0/base/1,/data/primary0/base/10898,/data/primary0/base/25780,/data/primary0/base/34782")
primary1 = GpDB.initFromString(
"3|1|p|p|s|u|sdw2|sdw2|40001|41001|/data/primary1||/data/primary1/base/10899,/data/primary1/base/1,/data/primary1/base/10898,/data/primary1/base/25780,/data/primary1/base/34782")
mirror0 = GpDB.initFromString(
"4|0|m|m|s|u|sdw2|sdw2|50000|51000|/data/mirror0||/data/mirror0/base/10899,/data/mirror0/base/1,/data/mirror0/base/10898,/data/mirror0/base/25780,/data/mirror0/base/34782")
mirror1 = GpDB.initFromString(
"5|1|m|m|s|u|sdw1|sdw1|50001|51001|/data/mirror1||/data/mirror1/base/10899,/data/mirror1/base/1,/data/mirror1/base/10898,/data/mirror1/base/25780,/data/mirror1/base/34782")
return GpArray([master, primary0, primary1, mirror0, mirror1])
if __name__ == '__main__':
run_tests()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Module to run optimzer modeller with differents number of measures.
The aim is to visualize the minimal number of measures witch maintains the
model error at acceptable limits
usage: parsecpy_runmodel_errors [-h] -p PARSECPYDATAFILEPATH
[-v VERBOSITY]
Script to run multiples runs with differents number of train points
to modelling the application
optional arguments:
-h, --help show this help message and exit
--config CONFIG Filepath from Configuration file configurations
parameters
-p PARSECPYDATAFILEPATH, --parsecpydatafilepath PARSECPYDATAFILEPATH
Path from input data file from Parsec specificated
package.
-i INITNUMBER, --initnumber INITNUMBER
Number initial of measures number
-v VERBOSITY, --verbosity VERBOSITY
verbosity level. 0 = No verbose
Example
parsecpy_runmodel_errors --config meuconfig.json
-p /var/myparsecdata.dat -v 3
"""
import os
import sys
import time
from datetime import datetime
import numpy as np
import json
import argparse
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
from concurrent import futures
from parsecpy import Swarm, CoupledAnnealer, ParsecData, ParsecModel
from parsecpy import data_detach, measures_idx_split_train_test
def workers(args):
name = args[0]
config = args[1]
measure = args[2]
measure_detach = data_detach(measure)
train_idx = args[3]
test_idx = args[4]
x_train = measure_detach['x'][train_idx]
y_train = measure_detach['y'][train_idx]
x_test = measure_detach['x'][test_idx]
y_test = measure_detach['y'][test_idx]
if config['algorithm'] in ['svr', 'krr', 'tree', 'neural']:
measure_ml = measure.copy()
# measure_ml.coords['frequency'] = measure_ml.coords['frequency']/1e6
measure_ml_detach = data_detach(measure_ml)
scaler = StandardScaler()
scaler.fit(measure_ml_detach['x'])
x_train = scaler.transform(measure_ml_detach['x'][train_idx])
y_train = measure_ml_detach['y'][train_idx]
x_test = scaler.transform(measure_ml_detach['x'][test_idx])
y_test = measure_ml_detach['y'][test_idx]
if config['algorithm'] == 'svr':
gs_ml = GridSearchCV(SVR(),
cv=config['crossvalidation-folds'],
param_grid={"C": config['c_grid'],
"gamma": config['gamma_grid']})
gs_ml.fit(x_train, y_train)
solution = gs_ml.best_params_
elif config['algorithm'] == 'krr':
gs_ml = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1),
param_grid={"alpha": config['alpha_grid'],
"gamma": config['gamma_grid']})
gs_ml.fit(x_train, y_train)
solution = ['']
elif config['algorithm'] == 'tree':
gs_ml = DecisionTreeRegressor(random_state=0)
gs_ml.fit(x_train, y_train)
solution = ['']
elif config['algorithm'] == 'neural':
alpha = 10.0 ** -np.arange(1, 7)
gs_ml = make_pipeline(StandardScaler(),
GridSearchCV(MLPRegressor(solver='lbfgs',
max_iter=2000,
random_state=0),
cv=3,
param_grid={"alpha": alpha}))
gs_ml.fit(x_train, y_train)
solution = ['']
y_predict = gs_ml.predict(x_test)
error = mean_squared_error(y_test, y_predict)
x_train = scaler.inverse_transform(x_train)
x_test = scaler.inverse_transform(x_test)
else:
kwargsmodel = {'overhead': config['overhead']}
if config['algorithm'] == 'pso':
optm = Swarm(config['lowervalues'], config['uppervalues'],
parsecpydatafilepath=config['parsecpydatafilepath'],
modelcodefilepath=config['modelcodefilepath'],
size=config['size'], w=config['w'],
c1=config['c1'], c2=config['c2'],
maxiter=config['maxiter'],
threads=config['threads'],
verbosity=config['verbosity'],
x_meas=x_train, y_meas=y_train,
kwargs=kwargsmodel)
elif config['algorithm'] == 'csa':
initial_state = np.array([np.random.uniform(size=config['dimension'])
for _ in range(config['size'])])
optm = CoupledAnnealer(initial_state,
parsecpydatafilepath=config['parsecpydatafilepath'],
modelcodefilepath=config['modelcodefilepath'],
size=config['size'],
steps=config['steps'],
update_interval=config['update_interval'],
tgen_initial=config['tgen_initial'],
tgen_upd_factor=config['tgen_upd_factor'],
tacc_initial=config['tacc_initial'],
alpha=config['alpha'],
desired_variance=config['desired_variance'],
lowervalues=config['lowervalues'],
uppervalues=config['uppervalues'],
threads=config['threads'],
verbosity=config['verbosity'],
x_meas=x_train,
y_meas=y_train,
kwargs=kwargsmodel)
else:
print('Error: You should inform the correct algorithm to use')
sys.exit()
error, solution = optm.run()
solution = {'sol': list(solution)}
model = ParsecModel(bsol=solution['sol'],
berr=error,
measure=measure,
modelcodesource=optm.modelcodesource,
modelexecparams=optm.get_parameters())
pred = model.predict(x_test)
error = mean_squared_error(y_test, pred['y'])
train_list = {'x': x_train.tolist(), 'y': y_train.tolist()}
test_list = {'x': x_test.tolist(), 'y': y_test.tolist()}
return {'name': name,
'train': train_list, 'test': test_list,
'dims': measure_detach['dims'],
'error': error, 'params': solution}
def argsparsevalidation():
"""
Validation of script arguments passed via console.
:return: argparse object with validated arguments.
"""
parser = argparse.ArgumentParser(description='Script to run multiples '
'runs with differents number '
'of train points to modelling '
'the application')
parser.add_argument('--config', required=True,
help='Filepath from Configuration file '
'configurations parameters')
parser.add_argument('-p', '--parsecpydatafilepath',
help='Path from input data file from Parsec '
'specificated package.')
parser.add_argument('-i', '--initnumber', type=int,
help='Number initial of measures number')
parser.add_argument('-v', '--verbosity', type=int,
help='verbosity level. 0 = No verbose')
args = parser.parse_args()
return args
def main():
"""
Main function executed from console run.
"""
print("\n***** Processing the Models *****")
args = argsparsevalidation()
if args.config:
if not os.path.isfile(args.config):
print('Error: You should inform the correct config file path.')
sys.exit()
with open(args.config, 'r') as fconfig:
config = json.load(fconfig)
for i, v in vars(args).items():
if v is not None:
config[i] = v
else:
config = vars(args)
if not os.path.isfile(config['parsecpydatafilepath']):
print('Error: You should inform the correct parsecpy measures file')
sys.exit()
parsec_exec = ParsecData(config['parsecpydatafilepath'])
measure = parsec_exec.speedups()
measure_detach = data_detach(measure)
# parsec_model = ParsecData(args.modelcodefilepath)
# config = parsec_model.modelexecparams
# if 'svr' in config['algorithm']:
# tipo_modelo = '5'
# else:
# tipo_modelo = re.search(r'config\d', parsec_model.modelcommand).group()
# tipo_modelo = tipo_modelo[-1]
# measure = parsec_model.measure
# if args.verbosity:
# config['verbosity'] = args.verbosity
#
#
# computed_errors = []
samples_n = 1
for i in [len(measure.coords[i]) for i in measure.coords]:
samples_n *= i
train_size = config['initnumber']
model_results = {}
for m in config['models']:
with open(m["conf_file"]) as f:
model_config = json.load(f)
model_config["verbosity"] = config["verbosity"]
model_results[m["name"]] = {
"algorithm": model_config['algorithm'],
"configfilepath": m["conf_file"],
"config": model_config,
"data": {}
}
while True:
print('\nSample size: ', train_size)
(train_idx, test_idx) = measures_idx_split_train_test(measure, train_size=train_size)
samples_args = [(m[0], m[1]['config'], measure,
train_idx, test_idx)
for m in model_results.items()]
starttime = time.time()
with futures.ProcessPoolExecutor(max_workers=len(samples_args)) as executor:
results = executor.map(workers, samples_args)
for i in results:
if train_size not in model_results[i["name"]]["data"].keys():
model_results[i["name"]]["data"][train_size] = {
'train': [],
'test': [],
'error': [],
'params': []
}
model_results[i["name"]]["data"][train_size]["train"].append(i['train'])
model_results[i["name"]]["data"][train_size]["test"].append(i['test'])
model_results[i["name"]]["data"][train_size]["error"].append(i['error'])
model_results[i["name"]]["data"][train_size]["params"].append(i['params'])
endtime = time.time()
print(' Execution time = %.2f seconds' % (endtime - starttime))
# sf = ShuffleSplit(n_splits=config["numberofsplits"],
# train_size=train_size,
# test_size=(samples_n - train_size))
# splits = []
# split_n = 1
# for train_idx, test_idx in sf.split(measure_detach['x']):
#
# print(' ** # split = ', split_n)
#
# samples_args = [(m[0], m[1]['config'], measure,
# train_idx, test_idx)
# for m in model_results.items()]
#
# starttime = time.time()
#
# with futures.ProcessPoolExecutor(max_workers=len(samples_args)) \
# as executor:
# results = executor.map(workers, samples_args)
# for i in results:
# if train_size not in model_results[i["name"]]["data"].keys():
# model_results[i["name"]]["data"][train_size] = {
# 'train': [],
# 'test': [],
# 'error': [],
# 'params': []
# }
# model_results[i["name"]]["data"][train_size]["train"].append(i['train'])
# model_results[i["name"]]["data"][train_size]["test"].append(i['test'])
# model_results[i["name"]]["data"][train_size]["error"].append(i['error'])
# model_results[i["name"]]["data"][train_size]["params"].append(i['params'])
#
# endtime = time.time()
# print(' Execution time = %.2f seconds' % (endtime - starttime))
if train_size >= int(samples_n/2):
break
train_size *= 2
print('\n\n***** Final Results *****\n')
for name, m in model_results.items():
print("Model Name: {}".format(name))
for ts, d in m["data"].items():
print(' Train Size: {0:2d}'.format(ts))
print(' * Median of Errors: {}'.format(np.median(d['error'])))
filedate = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
pkgname = os.path.basename(config['parsecpydatafilepath']).split('_')[0]
filename = '%s_%s_errors_%s.errordat' % (pkgname,
name,
filedate)
with open(filename, 'w') as f:
json.dump({"parsecpydatafilepath": config['parsecpydatafilepath'],
"measure_dims": measure_detach['dims'],
"model_config": m["config"],
'data': m["data"]},
f,
ensure_ascii=False)
print('Errors data saved on filename: %s' % filename)
print('\n\n***** ALL DONE! *****\n')
if __name__ == '__main__':
main()
|
|
"""Unit tests for VLAN"""
import unittest
from ipaddress import ip_address, ip_network, ip_interface
from faucet.vlan import VLAN
class FaucetVLANBaseTest(unittest.TestCase): # pytype: disable=module-attr
"""Set up defaults for VLAN tests"""
def setUp(self):
"""Defines the default config - this should match the documentation"""
self.default_config = {
'acl_in': None,
'acls_in': None,
'bgp_as': None,
'bgp_connect_mode': 'both',
'bgp_local_address': None,
'bgp_neighbour_addresses': [],
'bgp_neighbour_as': None,
'bgp_port': 9179,
'bgp_routerid': None,
'bgp_server_addresses': ['0.0.0.0', '::'],
'description': None,
'faucet_mac': '0e:00:00:00:00:01',
'faucet_vips': [],
'max_hosts': 255,
'minimum_ip_size_check': True,
'proactive_arp_limit': 2052,
'proactive_nd_limit': 2052,
'routes': None,
'targeted_gw_resolution': False,
'unicast_flood': True,
}
class FaucetVLANConfigTest(FaucetVLANBaseTest):
"""Test that VLAN serialises config as it receives it"""
default_config = None
def test_basic_config(self):
"""Tests the minimal config"""
input_config = {
'vid': 100
}
expected_config = self.default_config
expected_config.update(input_config)
vlan = VLAN(1, 1, input_config)
output_config = vlan.to_conf()
self.assertEqual(output_config, expected_config)
key_exceptions = [
'name',
'tagged',
'dyn_gws_by_ipv',
'dyn_host_cache_by_port',
'dp_id',
'bgp_neighbor_addresses',
'bgp_neighbor_as',
'dyn_routes_by_ipv',
'_id',
'dyn_neigh_cache_by_ipv',
'dyn_ipvs',
'dyn_bgp_ipvs',
'dyn_host_cache',
'dyn_faucet_vips_by_ipv',
'dyn_bgp_neighbor_addresses_by_ipv',
'dyn_bgp_server_addresses_by_ipv',
'untagged'
]
dict_keys = set(vlan.__dict__.keys())
conf_keys = set(vlan.to_conf().keys())
for exception in key_exceptions:
dict_keys.remove(exception)
self.assertEqual(dict_keys, conf_keys)
def test_with_routes(self):
"""Tests a config with routes"""
input_config = {
'routes': [
{'route' : {'ip_dst': '10.99.99.0/24', 'ip_gw': '10.0.0.1'}},
{'route' : {'ip_dst': '10.99.98.0/24', 'ip_gw': '10.0.0.99'}}
],
'vid': 100
}
expected_config = self.default_config
expected_config.update(input_config)
vlan = VLAN(1, 1, input_config)
output_config = vlan.to_conf()
self.assertEqual(output_config, expected_config)
def test_with_vips(self):
"""Tests a config with virtual IPs"""
input_config = {
'faucet_vips': ['10.0.0.254/24'],
'vid': 100
}
expected_config = self.default_config
expected_config.update(input_config)
vlan = VLAN(1, 1, input_config)
output_config = vlan.to_conf()
self.assertEqual(output_config, expected_config)
class FaucetVLANMethodTest(FaucetVLANBaseTest):
"""Initialises VLANs with different configs and sanity checks the associated methods"""
def setUp(self):
"""Use the default config as a base"""
super(FaucetVLANMethodTest, self).setUp()
self.input_config = self.default_config
def test_ipvs_no_ips(self):
"""Tests the ipvs() method with no vips"""
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.ipvs(), [])
def test_ipvs_ipv4(self):
"""Tests the ipvs() method with an IPv4 vip"""
self.input_config.update({
'faucet_vips': ['10.0.0.254/24']
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.ipvs(), [4])
def test_ipvs_ipv6(self):
"""Tests the ipvs() method with an IPv6 vip"""
self.input_config.update({
'faucet_vips': ['2001::1/16']
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.ipvs(), [6])
def test_ipvs_ipv4_ipv6(self):
"""Tests the ipvs() method with both IPv4 and IPv6 vips"""
self.input_config.update({
'faucet_vips': [
'2001::1/16',
'fe80::1/64',
'10.0.0.254/24'
]
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(set(vlan.ipvs()), set([4, 6]))
def test_bgp_servers_change_bgp_ipvs_ipv4(self):
"""Tests the ipvs() method with an IPv4 BGP server"""
self.input_config.update({
'bgp_server_addresses': ['127.0.0.1']
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.bgp_ipvs(), [4])
def test_bgp_servers_change_bgp_ipvs_ipv6(self):
"""Tests the ipvs() method with an IPv4 BGP server"""
self.input_config.update({
'bgp_server_addresses': ['::1']
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.bgp_ipvs(), [6])
def test_bgp_servers_change_bgp_ipvs_both(self):
"""Tests the ipvs() method with an IPv4 BGP server"""
self.input_config.update({
'bgp_server_addresses': ['127.0.0.1', '::1']
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.bgp_ipvs(), [4, 6])
self.assertEqual(vlan.bgp_server_addresses_by_ipv(4), [ip_address('127.0.0.1')])
self.assertEqual(vlan.bgp_server_addresses_by_ipv(6), [ip_address('::1')])
def test_faucet_vips_by_ipv_none(self):
"""Tests the faucet_vips_by_ipv() method when there are no vips"""
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.faucet_vips_by_ipv(4), [])
self.assertEqual(vlan.faucet_vips_by_ipv(6), [])
def test_faucet_vips_by_ipv_both(self):
"""Tests the faucet_vips_by_ipv() method when there are both IPv4 and IPv6 vips"""
self.input_config.update({
'faucet_vips': [
'2001::1/16',
'fe80::1/64',
'10.0.0.254/24'
]
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(set(vlan.faucet_vips_by_ipv(4)), set([
ip_interface('10.0.0.254/24')
]))
self.assertEqual(set(vlan.faucet_vips_by_ipv(6)), set([
ip_interface('2001::1/16'),
ip_interface('fe80::1/64')
]))
def test_routes_by_ipv_none(self):
"""Tests the routes_by_ipv() and route_count_by_ipv() methods with no routes"""
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.routes_by_ipv(4), {})
self.assertEqual(vlan.routes_by_ipv(6), {})
self.assertEqual(vlan.route_count_by_ipv(4), 0)
self.assertEqual(vlan.route_count_by_ipv(6), 0)
def test_routes_by_ipv_both(self):
"""Tests the routes_by_ipv() and route_count_by_ipv() methods with both
IPv4 and IPv6 routes"""
self.input_config.update({
'routes': [
{'route': {'ip_dst': '10.99.99.0/24', 'ip_gw': '10.0.0.1'}},
{'route': {'ip_dst': '10.99.98.0/24', 'ip_gw': '10.0.0.99'}},
{'route': {'ip_dst': '10.99.97.0/24', 'ip_gw': '10.0.0.99'}},
{'route': {'ip_dst': 'fc00::10:0/112', 'ip_gw': 'fc00::1:1'}},
{'route': {'ip_dst': 'fc00::20:0/112', 'ip_gw': 'fc00::1:99'}}
],
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.routes_by_ipv(4), {
ip_network('10.99.99.0/24'): ip_address('10.0.0.1'),
ip_network('10.99.98.0/24'): ip_address('10.0.0.99'),
ip_network('10.99.97.0/24'): ip_address('10.0.0.99'),
})
self.assertEqual(vlan.routes_by_ipv(6), {
ip_network('fc00::10:0/112'): ip_address('fc00::1:1'),
ip_network('fc00::20:0/112'): ip_address('fc00::1:99'),
})
self.assertEqual(vlan.route_count_by_ipv(4), 3)
self.assertEqual(vlan.route_count_by_ipv(6), 2)
def test_modify_routes_v4(self):
"""Tests the add_route() and remove_route() methods with IPv4 routes"""
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.routes_by_ipv(4), {})
vlan.add_route(ip_network('10.99.99.0/24'), ip_address('10.0.0.1'))
vlan.add_route(ip_network('10.99.98.0/24'), ip_address('10.0.0.99'))
self.assertEqual(vlan.routes_by_ipv(4), {
ip_network('10.99.99.0/24'): ip_address('10.0.0.1'),
ip_network('10.99.98.0/24'): ip_address('10.0.0.99')
})
self.assertEqual(vlan.route_count_by_ipv(4), 2)
vlan.del_route(ip_network('10.99.99.0/24'))
self.assertEqual(vlan.routes_by_ipv(4), {
ip_network('10.99.98.0/24'): ip_address('10.0.0.99')
})
self.assertEqual(vlan.route_count_by_ipv(4), 1)
vlan.del_route(ip_network('10.99.98.0/24'))
self.assertEqual(vlan.route_count_by_ipv(4), 0)
self.assertEqual(vlan.routes_by_ipv(4), {})
def test_modify_routes_v6(self):
"""Tests the add_route() and remove_route() methods with IPv4 routes"""
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.routes_by_ipv(6), {})
vlan.add_route(ip_network('fc00::10:0/112'), ip_address('fc00::1:1'))
vlan.add_route(ip_network('fc00::20:0/112'), ip_address('fc00::1:99'))
self.assertEqual(vlan.routes_by_ipv(6), {
ip_network('fc00::10:0/112'): ip_address('fc00::1:1'),
ip_network('fc00::20:0/112'): ip_address('fc00::1:99')
})
self.assertEqual(vlan.route_count_by_ipv(6), 2)
vlan.del_route(ip_network('fc00::10:0/112'))
self.assertEqual(vlan.routes_by_ipv(6), {
ip_network('fc00::20:0/112'): ip_address('fc00::1:99')
})
self.assertEqual(vlan.route_count_by_ipv(6), 1)
vlan.del_route(ip_network('fc00::20:0/112'))
self.assertEqual(vlan.route_count_by_ipv(6), 0)
self.assertEqual(vlan.routes_by_ipv(6), {})
def test_modify_routes_static_v4(self):
"""Tests the add_route() and remove_route() methods,
starting with configured static routes for IPv4"""
self.input_config.update({
'routes': [
{'route': {'ip_dst': '10.99.97.0/24', 'ip_gw': '10.0.0.99'}},
],
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.routes_by_ipv(4), {
ip_network('10.99.97.0/24'): ip_address('10.0.0.99')
})
vlan.add_route(ip_network('10.99.99.0/24'), ip_address('10.0.0.1'))
vlan.add_route(ip_network('10.99.98.0/24'), ip_address('10.0.0.99'))
self.assertEqual(vlan.routes_by_ipv(4), {
ip_network('10.99.99.0/24'): ip_address('10.0.0.1'),
ip_network('10.99.98.0/24'): ip_address('10.0.0.99'),
ip_network('10.99.97.0/24'): ip_address('10.0.0.99')
})
self.assertEqual(vlan.route_count_by_ipv(4), 3)
vlan.del_route(ip_network('10.99.99.0/24'))
self.assertEqual(vlan.routes_by_ipv(4), {
ip_network('10.99.97.0/24'): ip_address('10.0.0.99'),
ip_network('10.99.98.0/24'): ip_address('10.0.0.99')
})
self.assertEqual(vlan.route_count_by_ipv(4), 2)
vlan.del_route(ip_network('10.99.98.0/24'))
self.assertEqual(vlan.route_count_by_ipv(4), 1)
self.assertEqual(vlan.routes_by_ipv(4), {
ip_network('10.99.97.0/24'): ip_address('10.0.0.99')
})
def test_modify_routes_static_v6(self):
"""Tests the add_route() and remove_route() methods,
starting with configured static routes for IPv6"""
self.input_config.update({
'routes': [
{'route': {'ip_dst': 'fc00::30:0/112', 'ip_gw': 'fc00::1:99'}},
],
})
vlan = VLAN(1, 1, self.input_config)
self.assertEqual(vlan.routes_by_ipv(6), {
ip_network('fc00::30:0/112'): ip_address('fc00::1:99')
})
vlan.add_route(ip_network('fc00::10:0/112'), ip_address('fc00::1:1'))
vlan.add_route(ip_network('fc00::20:0/112'), ip_address('fc00::1:99'))
self.assertEqual(vlan.routes_by_ipv(6), {
ip_network('fc00::10:0/112'): ip_address('fc00::1:1'),
ip_network('fc00::20:0/112'): ip_address('fc00::1:99'),
ip_network('fc00::30:0/112'): ip_address('fc00::1:99')
})
self.assertEqual(vlan.route_count_by_ipv(6), 3)
vlan.del_route(ip_network('fc00::10:0/112'))
self.assertEqual(vlan.routes_by_ipv(6), {
ip_network('fc00::30:0/112'): ip_address('fc00::1:99'),
ip_network('fc00::20:0/112'): ip_address('fc00::1:99')
})
self.assertEqual(vlan.route_count_by_ipv(6), 2)
vlan.del_route(ip_network('fc00::20:0/112'))
self.assertEqual(vlan.route_count_by_ipv(6), 1)
self.assertEqual(vlan.routes_by_ipv(6), {
ip_network('fc00::30:0/112'): ip_address('fc00::1:99')
})
|
|
#!/usr/bin/env python
"""This module provides tools for making parallel dirsig runs.
Public Repository:
https://github.com/pavdpr/DIRSIG/
USAGE:
python parallel.py [options] or
parallel.py [options] If execute permissions on parallel.py and it is
in the path.
[options] are:
--path=<path> Set the path to search for sim files from. The
default is the path where this command is
executed from.
--processes=<number> Set the number of processes to run
simultaneously. The default is 2.
--regex=<regex> Set the regular expression to search for sim
files. Quotes may be needed around the
regular expression to properly pass it to
python. The default is r'.+\.sim' (all sim
files).
--exclude=<regex> Trim the list of sim files by not processing
any sim file that matches the regular
expression.
--rmsim=<sim file> Do not process this sim file.
--addsim=<sim file> Add a specific sim file to the list of sims to
run. These sim files will be earlier in the
list to run.
--dirsig=<dirsig version> Set the dirsig executable name. The default is
dirsig.
--logfile=<log file name> Set the logfile name. The default is log.
--option=<option> Set an option to pass to the dirsig executable.
Multiple options need to be passed
independantly.
--run Run the simulation. Not setting the --run flag
will show the simulations that would be run.
Notes:
- The angle brackets after each of the above options should NOT be included.
- There should not be spaces on either side of the equals.
SAMPLE USAGE:
parallel.py
Shows what settings were used. Does NOT execute any runs. Allows the user
to review what simulations will be run.
parallel.py --run
Runs with all defaults.
parallel.py --path=/some/path --dirsig=dirsig-4.7.0 --processes=8 --run
Searches for all sim files in /some/path and executes dirsig-4.7.0 on 8
cores.
parallel.py --option=--mode=preview --option=--output_prefix=foobar --run
Runs dirsig in preview mode and with an output prefix of foobar. This runs
dirsig --mode=preview --output_prefix=foobar sim.sim &> log
parallel.py --regex="simulation.*\.sim' --run
Searches for all simulations that match simulation.*\.sim
REQUIRED PACKAGES:
multiprocessing or subprocess
re
os
"""
__author__ = 'Paul Romanczyk'
__copyright__ = "Copyright 2015, Rochester Institute of Technology"
__credits__ = []
__license__ = "MIT"
#__version__ = "1.0"
__maintainer__ = "Paul Romanczyk"
__email__ = "[email protected]"
__status__ = "Production"
try:
import multiprocessing
_HAS_MULTIPROCESSING_ = True
except Exception:
import subprocess
_HAS_MULTIPROCESSING_ = False
import os
import re
def find_sims_by_regex(regex, pth='.'):
"""Finds sim files in a directory tree by using regular expressions.
Args:
regex (_sre.SRE_Pattern): The regular expression to use. This should
be compiled e.g., re.compile(r'.+sim') which will find all sim files.
pth (str, optional): The path to search. The default is '.'.
Returns:
A list of all list of strings containing all files that match the regex.
If no matches are found, the list will be empty.
"""
output = []
# search the directory tree starting with pth
for root, _, files in os.walk(pth):
for current_file in files:
if regex.search(current_file):
# check if the file alone matches the regex
output.append(os.path.join(root, current_file))
elif regex.search(os.path.join(root, current_file)):
# check if the file and directory matches the regex
output.append(os.path.join(root, current_file))
return output
def exclude_sims_by_regex(sims, regex):
"""Removes sims by using a regular expression.
DESCRIPTION:
Returns all sims that do NOT match the regular expression.
ARGS:
sims (iterable of strings): An iterable of strings contating candidate
sim files
regex (_sre.SRE_Pattern): The regular expression to use. This should
be compiled e.g., re.compile(r'.+sim') which will find all sim files.
RETURNS:
A list of strings that do not match the regular expression.
"""
output = []
for sim in sims:
if not regex.search(sim):
output.append(sim)
return output
def clean_cmd(cmd):
"""Removes multiple spaces and whitespace at beginning or end of command.
Args:
cmd (str): A string containing the command to clean.
Returns:
A cleaned command string.
"""
return re.sub(r'\s{2, }', ' ', cmd).strip(' \t\n\r')
def cd_for_run(cmd, pth='.', delim=';', basepath=None):
"""Modifies the DIRSIG command to change directories.
This will add add a cd command to execute before the dirsig call. After the
dirsig call, it will add a second cd to change directories back to the
original one. If the directory (pth) is '.', the original command will be
returned.
Args:
cmd (str): The dirsig command to run in between the cd commands.
pth (str, optional): A string containing the path to change to. The
default is '.'.
delim (str, optional): The deliminater betwen the cd's and command. The
default is ';'.
basepath (str, optional): A string containing the refence path. If none,
basepath will default to os.getcwd().
Notes:
This should be run from the directory where the main call will be made
to get the paths right.
Exceptions:
RuntimeError: If pth does not exist.
Returns:
A string with the new command including the cd commands.
"""
try:
if not pth:
return cmd
elif not os.path.isdir(pth):
raise RuntimeError("The sim path '" + pth + "' does not exist")
if not basepath:
basepath = os.getcwd()
elif not os.path.isdir(basepath):
raise RuntimeError("The base path '" + basepath + "' does not exist")
if os.path.samefile(basepath, pth):
return cmd
return clean_cmd('cd ' + os.path.relpath(pth, basepath) + delim + ' ' + \
cmd + delim + ' cd ' + os.path.relpath(basepath, pth))
except RuntimeError, error:
raise error
def remove_sim_files(sims, ommit=[]):
""" Removes sim files.
DESCRIPTION:
Removes sim files. There are 3 sets of sim files that will be removed:
1. sim files in the iterable ommit.
2. sim files that are duplicates of ones already processed.
3. sim files that do not exist on disk.
ARGS:
sims (iterable of str): An iterable of sim files.
ommit (iterable of str): An iterable of sim files to ommit.
RETURNS:
list of str: A list of sim files with no duplicates.
"""
tmpset = set()
# prepopulate with files that we want to ommit
for f in ommit:
tmpset.add(os.path.abspath(f))
output = []
# remove sim files
for sim in sims:
if os.path.abspath(sim) not in tmpset:
tmpset.add(os.path.abspath(sim))
if os.path.isfile(sim):
output.append(os.path.relpath(sim))
return output
def make_dirsig_command(sim, options=None, dirsig='dirsig', logfile='log'):
""" Makes a command to rund dirsig.
Args:
sim (str): A string containing the name of the sim file.
options (str, optional): A string contating options to pass to dirsig.
The default is None.
dirsig (str, optional): A string containing the executable to of dirsig
to use. The default is 'dirsig'.
logfile (str, optional): A string contating the name of the logfile to
write to. The default is 'log'.
Returns:
A string for the dirsig command to call.
"""
# which dirsig to use
cmd = dirsig
# set options
if options:
cmd += ' ' + options
# add the sim
cmd += ' ' + sim
# add a log file.
cmd += ' &> ' + logfile
# clean the dirsig command
return clean_cmd(cmd)
def parallel_run_dirsig(cmds, processes=2):
"""Executes dirsig runs in parallel.
Args:
cmds (str): A list of strings, where each string is a dirsig command to
execute.
processes (int, optional): The number of processes to run at a time. The
default is 2.
Returns:
None
"""
if _HAS_MULTIPROCESSING_:
pool = multiprocessing.Pool(processes=processes)
pool.map(os.system, cmds)
else:
"""
Run with subprocess.
NOTE: "Invoking the system shell with shell=True can be a security
hazard if combinedwith untrusted input."
"""
if processes != 1:
print "WARNING: multiprocessing package is not installed."
print "\tOnly one process will be exectuted at a time."
for cmd in cmds:
subprocess.call(cmd, shell=True)
return
if __name__ == '__main__':
# set defaults
SEARCH_REGEX = []
EXCLUDE_REGEX = []
EXCLUDE_FILES = []
SIMS = []
DIRSIG = 'dirsig'
PATH = '.'
BASEPATH = None
PROCESSES = 2
LOGFILE = 'log'
OPTIONS = None
RUN = False
import sys
ARGS = sys.argv[1:]
REGEXREGEX1 = re.compile(r'regex="(.*)"', re.IGNORECASE)
REGEXREGEX2 = re.compile(r"regex='(.*)'", re.IGNORECASE)
I = 0
while I < len(ARGS):
ARG = ARGS[I]
if ARG.lower().startswith('--path='):
PATH = ARG[7:]
# elif ARG.lower().startswith('--basepath='):
# BASEPATH = ARG[11:]
elif ARG.lower().startswith('--processes='):
PROCESSES = int(ARG[12:])
elif ARG.lower().startswith('--regex='):
SEARCH_REGEX.append(ARG[8:])#.decode('string_escape')
elif ARG.lower().startswith('--exclude='):
EXCLUDE_REGEX.append(ARG[10:])#.decode('string_escape')
elif ARG.lower().startswith('--dirsig='):
DIRSIG = ARG[9:]
elif ARG.lower().startswith('--logfile='):
LOGFILE = ARG[10:]
elif ARG.lower().startswith('--addsim='):
SIMS.append(ARG[9:])
elif ARG.lower().startswith('--rmsim='):
EXCLUDE_FILES.append(ARG[8:])
elif ARG.lower().startswith('--option='):
if OPTIONS:
OPTIONS += ' ' + ARG[9:]
else:
OPTIONS = ARG[9:]
elif ARG.lower() == '--run':
RUN = True
else:
sys.exit("'" + ARG + "' is an unexpected command line option.")
I += 1
if not SEARCH_REGEX:
SEARCH_REGEX = [r'.+\.sim']
# Find some sim files
for REGEX in SEARCH_REGEX:
SIMS += find_sims_by_regex(re.compile(REGEX), pth=PATH)
# Exclude some sim files
for REGEX in EXCLUDE_REGEX:
SIMS = exclude_sims_by_regex(SIMS, re.compile(REGEX))
# Remove duplicate sim files
SIMS = remove_sim_files(SIMS, EXCLUDE_FILES)
if not RUN:
print "dirsig.parallel.parallel.py"
print
print "Called from {0}".format(os.getcwd())
print "Searching: {0}".format(os.path.abspath(PATH))
print
print "Found {0} sim files:".format(len(SIMS))
for SIM in SIMS:
print "\t{0}".format(SIM)
print
print "To add more simulations add --regex=<regular expression> to " + \
"your python call."
print "To add a specific simulation add --addsim=<sim file> to your " + \
"python call."
print "To remove simulations add --exclude=<regular expression> to " + \
"your python call."
print "To remove a specific simulation add --rmsim=<sim file> to " + \
"your python call."
print
print "The following dirsig call will be performed on each sim file:"
print "\t{0}".format(make_dirsig_command("*.sim", options=OPTIONS, \
dirsig=DIRSIG, logfile = LOGFILE))
print
if not _HAS_MULTIPROCESSING_:
print "WARNING: multiprocessing package is not installed."
print "This will not work right now."
# if PROCESSES != 1 :
# print "\tOnly one process will be exectuted at a time."
else:
print "Executing on {0} cores.".format(PROCESSES)
print "To change the number of processes add --processes=<n> to " + \
"your python call."
print
print "To run with these settings, use:"
print "\tpython parallel.py {0} --run".format(" ".join(ARGS))
else:
# make dirsig commands
CMDS = []
for SIM in SIMS:
(DIR, SIMFILE) = os.path.split(SIM)
CMDS.append(cd_for_run(make_dirsig_command(SIMFILE, options=OPTIONS, \
dirsig=DIRSIG, logfile=LOGFILE), pth=DIR, basepath=BASEPATH))
# run dirsig
parallel_run_dirsig(CMDS, processes=PROCESSES)
|
|
# coding=utf-8
from datetime import date, datetime, timedelta
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django_countries.fields import CountryField
from feincms.templatetags.feincms_thumbnail import thumbnail
from feincms.translations import (TranslatedObjectManager,
TranslatedObjectMixin, Translation)
try:
from django.utils import timezone
now = timezone.now
except ImportError:
now = datetime.now
@python_2_unicode_compatible
class CategoryBase(models.Model):
name = models.CharField(_('name'), max_length=50)
slug = models.SlugField(_('slug'), unique=True)
class Meta:
abstract = True
verbose_name = _('Event category')
verbose_name_plural = _('Event categories')
def __str__(self):
return self.name
class Category(CategoryBase):
class Meta(CategoryBase.Meta):
abstract = False
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
prepopulated_fields = {'slug': ('name',)}
class EventManager(TranslatedObjectManager):
def get_query_set(self):
return super(EventManager, self).get_query_set().select_related('venue')
def public(self):
return self.filter(privacy='OPEN')
def upcoming(self):
""" returns all upcoming and ongoing events """
today = date.today()
if now().hour < 6:
today = today - timedelta(days=1)
return self.public().filter(Q(start_time__gte=today) | Q(end_time__gte=today))
def past(self):
""" returns all past events """
today = date.today()
if now().hour < 6:
today = today - timedelta(days=1)
return self.public().filter(Q(start_time__lt=today) & Q(end_time__lt=today))
@python_2_unicode_compatible
class Venue(models.Model):
name = models.CharField(_('Location'), max_length=255)
street = models.CharField(_('Street'), max_length=255, blank=True)
city = models.CharField(_('City'), max_length=50, blank=True)
state = models.CharField(_('State'), max_length=30, blank=True)
zip = models.CharField(_('Zip'), max_length=10, blank=True)
country = CountryField(_('Country'), blank=True, null=True, default='CH')
latitude = models.DecimalField(
_('Latitude'), blank=True, null=True, max_digits=12, decimal_places=9)
longitude = models.DecimalField(
_('Longitude'), blank=True, null=True, max_digits=12, decimal_places=9)
class Meta:
verbose_name = _('Venue')
verbose_name_plural = _('Venues')
def __str__(self):
return u'%s, %s, %s' % (self.name, self.street, self.city)
class VenueAdmin(admin.ModelAdmin):
list_display = ('name', 'street', 'city', 'country')
PRIVACY_CHOICES = (('OPEN', _('open')),
('CLOSED', _('closed')),
('SECRET', _('private')),
)
@python_2_unicode_compatible
class EventBase(models.Model, TranslatedObjectMixin):
def __init__(self, *args, **kwargs):
super(EventBase, self).__init__(*args, **kwargs)
self.cleanse = getattr(settings, 'EVENT_CLEANSE', False)
self.meta = {'uses_medialibrary': True, 'editable': True}
owner = models.ForeignKey(User, blank=True, null=True, verbose_name=_('Owner'),
related_name='owns_%(app_label)s_%(class)s')
name = models.CharField(_('Name'), max_length=255)
short_description = models.TextField(_('Short Description'), blank=True)
description = models.TextField(_('Description'), blank=True)
start_time = models.DateTimeField(
_('Start time'), help_text=_('Start Datum und Zeit'))
end_time = models.DateTimeField(_('End time'), blank=True, null=True, help_text=_(
'leave blank for full day events'))
# location = models.CharField(_('Location'), max_length=255)
privacy = models.CharField(
_('Privacy'), max_length=10, choices=PRIVACY_CHOICES)
updated_time = models.DateTimeField(_('updated time'), auto_now=True)
picture = models.ForeignKey(settings.AGENDA_MEDIA_FILE, blank=True, null=True,
related_name="%(app_label)s_%(class)s_events")
# custom fields:
slug = models.SlugField(_('Slug'), max_length=100)
language = models.CharField(
_('Language'), max_length=5, choices=settings.LANGUAGES)
objects = EventManager()
class Meta:
abstract = True
ordering = ['start_time']
verbose_name = _('Event')
verbose_name_plural = _('Events')
#connections = ['feed', 'invited', 'attending', 'maybe', 'noreply', 'declined', 'picture']
def __str__(self):
return u'%s (%s)' % (self.name, self.start_time)
def clean(self):
# an event cannot end before start
if self.end_time and self.end_time <= self.start_time:
raise ValidationError(
_('The Event cannot end before start (Start date <= End date)'))
@property
def location(self):
return self.venue.name
@models.permalink
def get_absolute_url(self):
return ('event_detail', (self.slug,), {})
class Event(EventBase):
venue = models.ForeignKey(Venue)
categories = models.ManyToManyField(Category, blank=True,
related_name="%(app_label)s_%(class)s_related")
class Meta(EventBase.Meta):
abstract = False
class EventTranslation(Translation(Event)):
name = models.CharField(_('Name'), max_length=255)
short_description = models.TextField(_('Short Description'), blank=True)
description = models.TextField(_('Description'), blank=True)
class Meta:
verbose_name = _('Event Translation')
verbose_name_plural = _('Event Translations')
class EventAdminForm(forms.ModelForm):
class Meta:
widgets = {
'description': forms.widgets.Textarea(attrs={'class': 'tinymce'}),
'short_description': forms.widgets.Textarea(attrs={'class': 'tinymce'}),
}
class EventTranslationInline(admin.TabularInline):
model = EventTranslation
max_num = len(settings.LANGUAGES)
class EventAdmin(admin.ModelAdmin):
def thumb(self, obj):
try:
return u'<img src="%s" >' % thumbnail(obj.picture, '200x60')
except ValueError:
return u'No Image'
thumb.allow_tags = True
form = EventAdminForm
inlines = [EventTranslationInline]
save_on_top = True
list_display = ('__str__', 'start_time', 'end_time', 'privacy',
'location', 'thumb')
fieldsets = [
(None, {
'fields': ('privacy', 'start_time', 'end_time',
'name', 'slug', 'short_description',
'description', 'language',
'picture', 'venue', 'categories')
}),
]
list_filter = ('start_time', 'privacy')
raw_id_fields = ('picture', 'venue')
prepopulated_fields = {'slug': ('name',)}
|
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_serialization import jsonutils
from sahara.plugins.ambari import client as ambari_client
from sahara.plugins import exceptions as p_exc
from sahara.tests.unit import base
class AmbariClientTestCase(base.SaharaTestCase):
def setUp(self):
super(AmbariClientTestCase, self).setUp()
self.http_client = mock.Mock()
self.http_client.get = mock.Mock()
self.http_client.post = mock.Mock()
self.http_client.put = mock.Mock()
self.http_client.delete = mock.Mock()
self.headers = {"X-Requested-By": "sahara"}
self.remote = mock.Mock()
self.remote.get_http_client.return_value = self.http_client
self.instance = mock.Mock()
self.instance.remote.return_value = self.remote
self.instance.management_ip = "1.2.3.4"
self.good_pending_resp = mock.MagicMock()
self.good_pending_resp.status_code = 200
self.good_pending_resp.text = ('{"Requests": '
'{"id": 1, "status": "PENDING"}}')
def test_init_client_default(self):
client = ambari_client.AmbariClient(self.instance)
self.assertEqual(self.http_client, client._http_client)
self.assertEqual("http://1.2.3.4:8080/api/v1", client._base_url)
self.assertEqual("admin", client._auth.username)
self.assertEqual("admin", client._auth.password)
self.remote.get_http_client.assert_called_with("8080")
def test_init_client_manual(self):
client = ambari_client.AmbariClient(self.instance, port="1234",
username="user", password="pass")
self.assertEqual("http://1.2.3.4:1234/api/v1", client._base_url)
self.assertEqual("user", client._auth.username)
self.assertEqual("pass", client._auth.password)
self.remote.get_http_client.assert_called_with("1234")
def test_close_http_session(self):
with ambari_client.AmbariClient(self.instance):
pass
self.remote.close_http_session.assert_called_with("8080")
def test_get_method(self):
client = ambari_client.AmbariClient(self.instance)
client.get("http://spam")
self.http_client.get.assert_called_with(
"http://spam", verify=False, auth=client._auth,
headers=self.headers)
def test_post_method(self):
client = ambari_client.AmbariClient(self.instance)
client.post("http://spam", data="data")
self.http_client.post.assert_called_with(
"http://spam", data="data", verify=False, auth=client._auth,
headers=self.headers)
def test_put_method(self):
client = ambari_client.AmbariClient(self.instance)
client.put("http://spam", data="data")
self.http_client.put.assert_called_with(
"http://spam", data="data", verify=False, auth=client._auth,
headers=self.headers)
def test_delete_method(self):
client = ambari_client.AmbariClient(self.instance)
client.delete("http://spam")
self.http_client.delete.assert_called_with(
"http://spam", verify=False, auth=client._auth,
headers=self.headers)
def test_import_credential(self):
resp = mock.Mock()
resp.text = ""
resp.status_code = 200
self.http_client.post.return_value = resp
client = ambari_client.AmbariClient(self.instance)
client.import_credential("test", alias="credential",
data={"some": "data"})
self.http_client.post.assert_called_once_with(
"http://1.2.3.4:8080/api/v1/clusters/test/credentials/credential",
verify=False, data=jsonutils.dumps({"some": "data"}),
auth=client._auth, headers=self.headers)
def test_get_registered_hosts(self):
client = ambari_client.AmbariClient(self.instance)
resp_data = """{
"href" : "http://1.2.3.4:8080/api/v1/hosts",
"items" : [
{
"href" : "http://1.2.3.4:8080/api/v1/hosts/host1",
"Hosts" : {
"host_name" : "host1"
}
},
{
"href" : "http://1.2.3.4:8080/api/v1/hosts/host2",
"Hosts" : {
"host_name" : "host2"
}
},
{
"href" : "http://1.2.3.4:8080/api/v1/hosts/host3",
"Hosts" : {
"host_name" : "host3"
}
}
]
}"""
resp = mock.Mock()
resp.text = resp_data
resp.status_code = 200
self.http_client.get.return_value = resp
hosts = client.get_registered_hosts()
self.http_client.get.assert_called_with(
"http://1.2.3.4:8080/api/v1/hosts", verify=False,
auth=client._auth, headers=self.headers)
self.assertEqual(3, len(hosts))
self.assertEqual("host1", hosts[0]["Hosts"]["host_name"])
self.assertEqual("host2", hosts[1]["Hosts"]["host_name"])
self.assertEqual("host3", hosts[2]["Hosts"]["host_name"])
def test_update_user_password(self):
client = ambari_client.AmbariClient(self.instance)
resp = mock.Mock()
resp.text = ""
resp.status_code = 200
self.http_client.put.return_value = resp
client.update_user_password("bart", "old_pw", "new_pw")
exp_req = jsonutils.dumps({
"Users": {
"old_password": "old_pw",
"password": "new_pw"
}
})
self.http_client.put.assert_called_with(
"http://1.2.3.4:8080/api/v1/users/bart", data=exp_req,
verify=False, auth=client._auth, headers=self.headers)
def test_create_blueprint(self):
client = ambari_client.AmbariClient(self.instance)
resp = mock.Mock()
resp.text = ""
resp.status_code = 200
self.http_client.post.return_value = resp
client.create_blueprint("cluster_name", {"some": "data"})
self.http_client.post.assert_called_with(
"http://1.2.3.4:8080/api/v1/blueprints/cluster_name",
data=jsonutils.dumps({"some": "data"}), verify=False,
auth=client._auth, headers=self.headers)
def test_create_cluster(self):
client = ambari_client.AmbariClient(self.instance)
resp = mock.Mock()
resp.text = """{
"Requests": {
"id": 1,
"status": "InProgress"
}
}"""
resp.status_code = 200
self.http_client.post.return_value = resp
req_info = client.create_cluster("cluster_name", {"some": "data"})
self.assertEqual(1, req_info["id"])
self.http_client.post.assert_called_with(
"http://1.2.3.4:8080/api/v1/clusters/cluster_name",
data=jsonutils.dumps({"some": "data"}), verify=False,
auth=client._auth, headers=self.headers)
def test_add_host_to_cluster(self):
client = ambari_client.AmbariClient(self.instance)
resp = mock.Mock()
resp.text = ""
resp.status_code = 200
self.http_client.post.return_value = resp
instance = mock.MagicMock()
instance.fqdn.return_value = "i1"
instance.cluster.name = "cl"
client.add_host_to_cluster(instance)
self.http_client.post.assert_called_with(
"http://1.2.3.4:8080/api/v1/clusters/cl/hosts/i1",
verify=False, auth=client._auth, headers=self.headers)
def test_start_process_on_host(self):
client = ambari_client.AmbariClient(self.instance)
self.http_client.put.return_value = self.good_pending_resp
client.wait_ambari_request = mock.MagicMock()
instance = mock.MagicMock()
instance.fqdn.return_value = "i1"
instance.cluster.name = "cl"
client.start_service_on_host(instance, "HDFS", 'STATE')
self.http_client.put.assert_called_with(
"http://1.2.3.4:8080/api/v1/clusters/"
"cl/hosts/i1/host_components/HDFS",
data=jsonutils.dumps(
{
"HostRoles": {"state": "STATE"},
"RequestInfo": {
"context": "Starting service HDFS, "
"moving to state STATE"}
}),
verify=False, auth=client._auth, headers=self.headers)
def test_stop_process_on_host(self):
client = ambari_client.AmbariClient(self.instance)
check_mock = mock.MagicMock()
check_mock.status_code = 200
check_mock.text = '{"HostRoles": {"state": "SOME_STATE"}}'
self.http_client.get.return_value = check_mock
self.http_client.put.return_value = self.good_pending_resp
client.wait_ambari_request = mock.MagicMock()
instance = mock.MagicMock()
instance.fqdn.return_value = "i1"
client.stop_process_on_host("cluster_name", instance, "p1")
self.http_client.put.assert_called_with(
"http://1.2.3.4:8080/api/v1/clusters/"
"cluster_name/hosts/i1/host_components/p1",
data=jsonutils.dumps(
{
"HostRoles": {"state": "INSTALLED"},
"RequestInfo": {"context": "Stopping p1"}
}),
verify=False, auth=client._auth, headers=self.headers)
@mock.patch("sahara.plugins.ambari.client.context")
def test_wait_ambari_request(self, mock_context):
client = ambari_client.AmbariClient(self.instance)
check_mock = mock.MagicMock()
d1 = {"request_context": "r1", "request_status": "PENDING",
"progress_percent": 42}
d2 = {"request_context": "r1", "request_status": "COMPLETED",
"progress_percent": 100}
check_mock.side_effect = [d1, d2]
client.check_request_status = check_mock
client.wait_ambari_request("id1", "c1")
check_mock.assert_has_calls([mock.call("c1", "id1"),
mock.call("c1", "id1")])
@mock.patch("sahara.plugins.ambari.client.context")
def test_wait_ambari_request_error(self, mock_context):
client = ambari_client.AmbariClient(self.instance)
check_mock = mock.MagicMock()
d1 = {"request_context": "r1", "request_status": "ERROR",
"progress_percent": 146}
check_mock.return_value = d1
client.check_request_status = check_mock
self.assertRaises(p_exc.HadoopProvisionError,
client.wait_ambari_request, "id1", "c1")
|
|
"Definitions of fields in APT.txt"
APT_RECORDS = (
('record_type', 3),
('facility_site_number', 11),
('facility_type', 13),
('location_identifier', 4),
('information_effective_date', 10),
('faa_region_code', 3),
('faa_district_office_code', 4),
('associated_state_post_office_code', 2),
('associated_state_name', 20),
('county_name', 21),
('county_state_post_office_code', 2),
('associated_city', 40),
('facility_name', 50),
('ownership_type', 2),
('use_type', 2),
('owners_name', 35),
('owners_address', 72),
('owners_city_state_zip', 45),
('owners_phone_number', 16),
('facility_manager_name', 35),
('facility_manager_address', 72),
('facility_manager_city_state_zip', 45),
('facility_manager_phone_number', 16),
('point_latitude_formatted', 15),
('point_latitude_seconds', 12),
('point_longitude_formatted', 15),
('point_longitude_seconds', 12),
('point_determination_method', 1),
('elevation_msl', 7),
('elevation_determination_method', 1),
('magnetic_variation', 3),
('magnetic_variation_epoch_year', 4),
('traffic_pattern_agl', 4),
('sectional', 30),
('distance_to_central_business_district', 2),
('direction_to_central_business_district', 3),
('land_area_covered_acres', 5),
('boundary_artcc_identifier', 4),
('boundary_artcc_computer_identifier', 3),
('boundary_artcc_name', 30),
('responsible_artcc_identifier', 4),
('responsible_artcc_computer_identifier', 3),
('responsible_artcc_name', 30),
('tie_in_fss_on_airport', 1),
('tie_in_fss_identifier', 4),
('tie_in_fss_name', 30),
('local_phone_from_airport_to_fss_administrative', 16),
('toll_free_from_airport_to_fss_briefing', 16),
('alternate_fss_identifier', 4),
('alternate_fss_name', 30),
('toll_free_from_airport_to_alternate_fss_briefing', 16),
('notam_issuing_fss_identifier', 4),
('notam_d_available', 1),
('activation_date', 7),
('status_code', 2),
('arff_certifcation_type_and_date', 15),
('npias_federal_agreements_code', 7),
('airport_airspace_analysis_determination', 13),
('customs_airport_of_entry', 1),
('customs_landing_rights', 1),
('military_civil_joint_use', 1),
('military_landing_rights', 1),
# ('national_emergency_use_no_longer_maintained', 18),
# ('military_departments_interest_for_emergencies_no_longer_maintained', 6),
('airport_inspection_method', 2),
('agency_group_performing_inspection', 1),
('last_physical_inspection_date', 8),
('last_information_date', 8),
('available_fuels', 40),
('airframe_repair', 5),
('powerplant_repair', 5),
('bottled_oxygen_available', 8),
('bulk_oxygen_available', 8),
('lighting_schedule', 7),
('beacon_lighting_schedule', 7),
('control_tower', 1),
('unicom_frequencies', 7),
('common_traffic_advisory_frequency', 7),
('segmented_circle', 4),
('beacon_color', 3),
('landing_fees', 1),
('medical_use', 1),
('singles_based', 3),
('multis_based', 3),
('jets_based', 3),
('helicopters_based', 3),
('gliders_based', 3),
('military_based', 3),
('ultralights_based', 3),
('commercial_services_operations', 6),
('commuter_services_operations', 6),
('air_taxi_operations', 6),
('ga_local_operations', 6),
('ga_itinerant_operations', 6),
('military_operations', 6),
('operations_ending_date', 10),
('position_source', 16),
('position_source_date', 10),
('elevation_source', 16),
('elevation_source_date', 10),
('contract_fuel_available', 1),
('transient_storage', 12),
('other_services_available', 71),
('wind_indicator', 3),
('icao_identifier', 7),
(None, 312), # filler
)
ATT_RECORDS = (
('record_type', 3),
('facility_site_number', 11),
('state_post_office_code', 2),
('attendace_schedule_sequence', 2),
('attendance_schedule', 108),
(None, 1403), # filler
)
RWY_RECORDS = (
('record_type', 3),
('facility_site_number', 11),
('state_post_office_code', 2),
('runway_identification', 7),
('runway_length', 5),
('runway_width', 4),
('surface_type_condition', 12),
('surface_treatment', 5),
('pavement_classification_number', 11),
('lights_edge_intensity', 5),
('base_end_identifier', 3),
('base_end_true_alignment', 3),
('base_end_ils_type', 10),
('base_end_righthand_traffic', 1),
('base_end_runway_markings_type', 5),
('base_end_runway_markings_condition', 1),
# ('base_end_aircraft_arresting_device', 6),
('base_end_latitude_physical_runway_end_formatted', 15),
('base_end_latitude_physical_runway_end_seconds', 12),
('base_end_longitude_physical_runway_end_formatted', 15),
('base_end_longitude_physical_runway_end_seconds', 12),
('base_end_elevation_physical_runway_end', 7),
('base_end_threshold_crossing_height_agl', 3),
('base_end_visual_glide_path_angle_degrees', 4),
('base_end_latitude_displaced_threshold_formatted', 15),
('base_end_latitude_displaced_threshold_seconds', 12),
('base_end_longitude_displaced_threshold_formatted', 15),
('base_end_longitude_displaced_threshold_seconds', 12),
('base_end_elevation_displaced_threshold', 7),
('base_end_displaced_threshold_length_from_end', 4),
('base_end_elevation_touchdown_zone', 7),
('base_end_visual_glide_slope_indicators', 5),
('base_end_runway_visual_range_equipment_locations', 3),
('base_end_runway_visual_range_equipment', 1),
('base_end_approach_light_system', 8),
('base_end_runway_end_identifer_lights', 1),
('base_end_runway_centerline_lights', 1),
('base_end_runway_end_touchdown_lights', 1),
('base_end_controlling_object_description', 11),
('base_end_controlling_object_marked_lighted', 4),
('base_end_runway_category', 5),
('base_end_controlling_object_clearance_slope', 2),
('base_end_controlling_object_height_above_runway', 5),
('base_end_controlling_object_distance_runway_end', 5),
('base_end_controlling_object_centerline_offset', 7),
('reciprocal_end_identifer', 3),
('reciprocal_end_true_alignment', 3),
('reciprocal_end_ils_type', 10),
('reciprocal_end_righthand_traffic', 1),
('reciprocal_end_runway_markings', 5),
('reciprocal_end_runway_markings_condition', 1),
# ('reciprocal_end_aircraft_arresting_device', 6),
('reciprocal_end_latitude_physical_runway_end_formatted', 15),
('reciprocal_end_latitude_physical_runway_end_seconds', 12),
('reciprocal_end_longitude_physical_runway_end_formatted', 15),
('reciprocal_end_longitude_physical_runway_end_seconds', 12),
('reciprocal_end_elevation_physical_runway_end', 7),
('reciprocal_end_threshold_crossing_height_agl', 3),
('reciprocal_end_visual_glide_path_angle_degrees', 4),
('reciprocal_end_latitude_displaced_threshold_formatted', 15),
('reciprocal_end_latitude_displaced_threshold_seconds', 12),
('reciprocal_end_longitude_displaced_threshold_formatted', 15),
('reciprocal_end_longitude_displaced_threshold_seconds', 12),
('reciprocal_end_elevation_displaced_threshold', 7),
('reciprocal_end_displaced_threshold_length_from_end', 4),
('reciprocal_end_elevation_touchdown_zone', 7),
('reciprocal_end_visual_glide_slope_indicators', 5),
('reciprocal_end_runway_visual_range_equipment_locations', 3),
('reciprocal_end_runway_visual_range_equipment', 1),
('reciprocal_end_approach_light_system', 8),
('reciprocal_end_runway_end_identifer_lights', 1),
('reciprocal_end_runway_centerline_lights', 1),
('reciprocal_end_runway_end_touchdown_lights', 1),
('reciprocal_end_controlling_object_description', 11),
('reciprocal_end_controlling_object_marked_lighted', 4),
('reciprocal_end_runway_category', 5),
('reciprocal_end_controlling_object_clearance_slope', 2),
('reciprocal_end_controlling_object_height_above_runway', 5),
('reciprocal_end_controlling_object_distance_runway_end', 5),
('reciprocal_end_controlling_object_centerline_offset', 7),
('runway_length_source', 16),
('runway_length_source_date', 10),
('runway_weight_bearing_single_wheel', 6),
('runway_weight_bearing_dual_wheel', 6),
('runway_weight_bearing_two_dual_wheels', 6),
('runway_weight_bearing_two_dual_wheels_tandem', 6),
('base_end_runway_end_gradient', 5),
('base_end_runway_end_gradient_direction', 4),
('base_end_runway_end_position_source', 16),
('base_end_runway_end_position_source_date', 10),
('base_end_runway_end_elevation_source', 16),
('base_end_runway_end_elevation_source_date', 10),
('base_end_displaced_threshold_position_source', 16),
('base_end_displaced_threshold_position_source_date', 10),
('base_end_displaced_threshold_elevation_source', 16),
('base_end_displaced_threshold_elevation_source_date', 10),
('base_end_touchdown_zone_elevation_source', 16),
('base_end_touchdown_zone_elevation_source_date', 10),
('base_end_takeoff_run_available_feet', 5),
('base_end_takeoff_distance_available_feet', 5),
('base_end_aclt_stop_distance_available_feet', 5),
('base_end_landing_distance_available_feet', 5),
('base_end_available_landing_distance_lahso_feet', 5),
('base_end_intersecting_runway_lahso_id', 7),
('base_end_intersecting_entity_lahso_description_if_not_runway', 40),
('base_end_latitude_lahso_point_formatted', 15),
('base_end_latitude_lahso_point_seconds', 12),
('base_end_longitude_lahso_point_formatted', 15),
('base_end_longitude_lahso_point_seconds', 12),
('base_end_lahso_point_source', 16),
('base_end_lahso_point_source_date', 10),
('reciprocal_end_runway_end_gradient', 5),
('reciprocal_end_runway_end_gradient_direction', 4),
('reciprocal_end_runway_end_position_source', 16),
('reciprocal_end_runway_end_position_source_date', 10),
('reciprocal_end_runway_end_elevation_source', 16),
('reciprocal_end_runway_end_elevation_source_date', 10),
('reciprocal_end_displaced_threshold_position_source', 16),
('reciprocal_end_displaced_threshold_position_source_date', 10),
('reciprocal_end_displaced_threshold_elevation_source', 16),
('reciprocal_end_displaced_threshold_elevation_source_date', 10),
('reciprocal_end_touchdown_zone_elevation_source', 16),
('reciprocal_end_touchdown_zone_elevation_source_date', 10),
('reciprocal_end_takeoff_run_available_feet', 5),
('reciprocal_end_takeoff distance_available_feet', 5),
('reciprocal_end_aclt_stop_distance_available_feet', 5),
('reciprocal_end_landing_distance_available_feet', 5),
('reciprocal_end_available_landing_distance_lahso_feet', 5),
('reciprocal_end_intersecting_runway_lahso_id', 7),
('reciprocal_end_intersecting_entity_lahso_description_if_not_runway', 40),
('reciprocal_end_latitude_lahso_point_formatted', 15),
('reciprocal_end_latitude_lahso_point_seconds', 12),
('reciprocal_end_longitude_lahso_point_formatted', 15),
('reciprocal_end_longitude_lahso_point_seconds', 12),
('reciprocal_end_lahso_point_source', 16),
('reciprocal_end_lahso_point_source_date', 10),
(None, 388),
)
ARRESTING_RECORDS = (
('record_type', 3),
('facility_site_number', 11),
('state_post_office_code', 2),
('runway_identification', 7),
('runway_end_identifier', 3),
('aircraft_arresting_device_type', 9),
(None, 1494),
)
RMK_RECORDS = (
('record_type', 3),
('facility_site_number', 11),
('state_post_office_code', 2),
('element_name', 13),
('element_text', 1500),
)
# Maps the first field of APT.txt to the record type
APT_RECORD_MAP = {
'APT': APT_RECORDS,
'ARS': ARRESTING_RECORDS,
'ATT': ATT_RECORDS,
'RWY': RWY_RECORDS,
'RMK': RMK_RECORDS
}
|
|
# -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.http import (HttpResponse, HttpResponseNotFound, Http404,
HttpResponseRedirect, HttpResponseForbidden)
from django.shortcuts import (get_object_or_404, get_list_or_404,
render_to_response)
from django.template import Context, RequestContext, loader
from django.utils.translation import ugettext_lazy as _
from forms import ChoiceForm, PollForm, PollVotingForm
from models import Choice, Poll, Vote
def get_sidebar_polls(user):
created_by_user = None
answered_by_user = None
if user.is_authenticated():
created_by_user = Poll.objects.created_by_user(user.id)
answered_by_user = Poll.objects.answered_by_user(user.id)
sidebar_polls = {'created_by_user': created_by_user,
'answered_by_user': answered_by_user,
'recent': Poll.objects.recent()}
return sidebar_polls
def get_form_choices(choices):
form_choices = []
for choice in choices:
form_choices.append((str(choice.id), choice.choice))
return form_choices
def startpage(request):
""" Start page. """
sidebar_polls = get_sidebar_polls(request.user)
t = loader.get_template('polls-index.html')
c = RequestContext(request,
{'sidebar_polls': sidebar_polls,
'navigation': 'polls',
'navigation2': 'polls-all',})
return HttpResponse(t.render(c))
def show_poll(request, year, month, day, slug):
form = None
poll = get_object_or_404(Poll, slug=slug)
choices = Choice.objects.get_choices_and_votes_for_poll(poll.id)
show_results = False
if 'show-results' in request.GET or poll.status == "CLOSED":
show_results = True
if not request.user.is_authenticated():
voted_for_choice_id = None
else:
# Only show form if authenticated
try:
vote = Vote.objects.get(Q(user=request.user.id)&
Q(choice__poll=poll.id))
voted_for_choice_id = vote.choice.id
show_results = True
except Vote.DoesNotExist:
voted_for_choice_id = None
form_choices = get_form_choices(choices)
if request.method == 'POST':
form = PollVotingForm(request.POST,
choices=form_choices,
allow_new_choices=poll.allow_new_choices)
if form.is_valid():
if poll.allow_new_choices:
choice_id, choice_text = form.cleaned_data['choices']
else:
choice_id = form.cleaned_data['choices']
if choice_id == 'OTHER':
# Check for duplicates
choice, created = Choice.objects \
.get_or_create(poll=poll,
choice=choice_text,
defaults={'user': request.user})
# Voted already?
if voted_for_choice_id:
# Yes, change vote
vote.choice = choice
vote.save()
else:
# No, add vote
Vote.objects.create(user=request.user, choice=choice)
else:
# Check that the choice is valid for this poll
choice = get_object_or_404(Choice,
id=choice_id,
poll=poll.id)
# Voted already?
if voted_for_choice_id:
# Yes, change vote
vote.choice = choice
vote.save()
else:
# No, add vote
Vote.objects.create(user=request.user, choice=choice)
voted_for_choice_id = choice.id
choices = Choice.objects.get_choices_and_votes_for_poll(poll.id)
form_choices = get_form_choices(choices)
if poll.allow_new_choices:
poll_form_defaults = {'choices': (str(voted_for_choice_id),
'')}
else:
poll_form_defaults = {'choices': str(voted_for_choice_id)}
form = PollVotingForm(choices=form_choices,
allow_new_choices=poll.allow_new_choices,
initial=poll_form_defaults)
else:
# Form not submitted
if voted_for_choice_id:
if poll.allow_new_choices:
poll_form_defaults = {'choices': (str(voted_for_choice_id),
'')}
else:
poll_form_defaults = {'choices': str(voted_for_choice_id)}
form = PollVotingForm(choices=form_choices,
allow_new_choices=poll.allow_new_choices,
initial=poll_form_defaults)
else:
form = PollVotingForm(choices=form_choices,
allow_new_choices=poll.allow_new_choices)
number_of_votes = Vote.objects.votes_for_poll(poll.id).count()
related_polls = None
sidebar_polls = get_sidebar_polls(request.user)
t = loader.get_template('polls-show-poll.html')
c = RequestContext(request,
{'poll': poll,
'choices': choices,
'form': form,
'vote_id': voted_for_choice_id,
'number_of_votes': number_of_votes,
'related_polls': related_polls,
'sidebar_polls': sidebar_polls,
'show_results': show_results})
return HttpResponse(t.render(c))
@login_required
def create_poll(request):
if request.method == 'POST':
form = PollForm(request, request.POST)
if form.is_valid():
p = form.save()
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug': p.slug}))
else:
form = PollForm(request)
sidebar_polls = get_sidebar_polls(request.user)
t = loader.get_template('polls-create-poll.html')
c = RequestContext(request,
{'form': form,
'sidebar_polls': sidebar_polls,
'navigation': 'polls',
'navigation2': 'polls-create',})
return HttpResponse(t.render(c))
@login_required
def edit_poll(request, slug):
poll = get_object_or_404(Poll, slug=slug)
if request.user != poll.user:
raise PermissionDenied("You must own a poll in order to edit it.")
choices = Choice.objects.get_choices_and_votes_for_poll(poll.id)
poll_form = PollForm(request, instance=poll, prefix='poll')
choice_form = ChoiceForm(request, poll, prefix='choice')
if request.method == 'POST':
if 'poll' in request.POST:
poll_form = PollForm(request,
request.POST,
instance=poll,
prefix='poll')
if poll_form.is_valid():
p = poll_form.save()
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug': p.slug}))
elif 'choice' in request.POST:
choice_form = ChoiceForm(request,
poll,
request.POST,
prefix='choice')
if choice_form.is_valid():
choice, created = Choice.objects \
.get_or_create(poll=poll,
choice=choice_form.cleaned_data['choice'],
defaults={'user': request.user})
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug':
poll.slug}))
elif 'delete-choice' in request.POST and 'choice-id' in request.POST:
try:
choice = Choice.objects.get(id=request.POST['choice-id'],
poll=poll) \
.delete()
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug': poll.slug}))
except Choice.DoesNotExist:
raise Http404
elif 'delete' in request.POST:
poll.delete()
return HttpResponseRedirect(reverse('molnet-polls-startpage'))
elif 'close' in request.POST:
poll.status="CLOSED"
poll.save()
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug': poll.slug}))
elif 're-open' in request.POST:
poll.status="PUBLISHED"
poll.save()
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug': poll.slug}))
elif 'unpublish' in request.POST:
poll.status="DRAFT"
poll.save()
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug': poll.slug}))
elif 'publish' in request.POST:
poll.status="PUBLISHED"
poll.published_at = datetime.datetime.now()
poll.save()
return HttpResponseRedirect(reverse('molnet-polls-edit-poll',
kwargs={'slug': poll.slug}))
else:
raise Http404
related_polls = None
sidebar_polls = get_sidebar_polls(request.user)
t = loader.get_template('polls-edit-poll.html')
c = RequestContext(request,
{'poll': poll,
'choices': choices,
'choice_form': choice_form,
'poll_form': poll_form,
'related_polls': related_polls,
'sidebar_polls': sidebar_polls})
return HttpResponse(t.render(c))
|
|
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import datetime
from dateutil import tz as tzutil
import json
import logging
import os
import re
import time
import uuid
from collections import OrderedDict
from botocore.exceptions import ClientError
import boto3
from .common import BaseTest, event_data
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources import rds
from c7n import tags
logger = logging.getLogger(name="c7n.tests")
class RDSTest(BaseTest):
def test_rds_config_event(self):
event = event_data("rds-from-rule.json", "config")
p = self.load_policy({"name": "rds", "resource": "rds"})
source = p.resource_manager.get_source("config")
resource_config = json.loads(event["invokingEvent"])["configurationItem"]
resource = source.load_resource(resource_config)
self.assertEqual(
resource["Tags"], [{u"Key": u"workload-type", u"Value": u"other"}]
)
def test_rds_stop(self):
session_factory = self.replay_flight_data("test_rds_stop")
db_instance_id = "rds-test-instance-1"
client = session_factory().client("rds")
p = self.load_policy(
{
"name": "rds-stop",
"resource": "rds",
"filters": [{"DBInstanceIdentifier": db_instance_id}],
"actions": ["stop"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DBInstanceStatus"], "available")
result = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)
self.assertEqual(result["DBInstances"][0]["DBInstanceStatus"], "stopping")
def test_rds_start(self):
session_factory = self.replay_flight_data("test_rds_start")
db_instance_id = "rds-test-instance-2"
client = session_factory().client("rds")
p = self.load_policy(
{
"name": "rds-start",
"resource": "rds",
"filters": [{"DBInstanceIdentifier": db_instance_id}],
"actions": ["start"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DBInstanceStatus"], "stopped")
result = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)
self.assertEqual(result["DBInstances"][0]["DBInstanceStatus"], "starting")
def test_rds_autopatch(self):
session_factory = self.replay_flight_data("test_rds_auto_patch")
p = self.load_policy(
{
"name": "rds-tags",
"resource": "rds",
"filters": [{"AutoMinorVersionUpgrade": False}],
"actions": ["auto-patch"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rds_autopatch_with_window(self):
window = "mon:23:00-tue:01:00"
session_factory = self.replay_flight_data("test_rds_auto_patch_with_window")
p = self.load_policy(
{
"name": "rds-tags",
"resource": "rds",
"filters": [{"AutoMinorVersionUpgrade": False}],
"actions": [{"type": "auto-patch", "minor": True, "window": window}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
rds = session_factory().client("rds")
details = rds.describe_db_instances(
DBInstanceIdentifier=resources[0]["DBInstanceIdentifier"]
)
details = details["DBInstances"][0]
self.assertTrue(details["AutoMinorVersionUpgrade"])
self.assertEqual(details["PreferredMaintenanceWindow"], window)
def test_rds_tags(self):
session_factory = self.replay_flight_data("test_rds_tags")
p = self.load_policy(
{
"name": "rds-tags",
"resource": "rds",
"filters": [{"tag:Platform": "postgres"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rds_tag_trim(self):
self.patch(tags.TagTrim, "max_tag_count", 1)
session_factory = self.replay_flight_data("test_rds_tag_trim")
p = self.load_policy(
{
"name": "rds-tags",
"resource": "rds",
"filters": [{"tag:Platform": "postgres"}],
"actions": [{"type": "tag-trim", "preserve": ["Name", "Owner"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rds_tag_and_remove(self):
self.patch(rds.RDS, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_rds_tag_and_remove")
client = session_factory().client("rds")
p = self.load_policy(
{
"name": "rds-tag",
"resource": "rds",
"filters": [{"tag:Platform": "postgres"}],
"actions": [{"type": "tag", "key": "xyz", "value": "hello world"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["DBInstanceIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("xyz" in tag_map)
policy = self.load_policy(
{
"name": "rds-remove-tag",
"resource": "rds",
"filters": [{"tag:xyz": "not-null"}],
"actions": [{"type": "remove-tag", "tags": ["xyz"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertFalse("xyz" in tag_map)
def test_rds_mark_and_match(self):
session_factory = self.replay_flight_data("test_rds_mark_and_match")
p = self.load_policy(
{
"name": "rds-mark",
"resource": "rds",
"filters": [{"tag:Platform": "postgres"}],
"actions": [
{
"type": "mark-for-op",
"tag": "custodian_next",
"days": 1,
"op": "delete",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
policy = self.load_policy(
{
"name": "rds-mark-filter",
"resource": "rds",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_next",
"op": "delete",
"skew": 1,
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_rds_mark_hours(self):
localtz = tzutil.gettz("Etc/UTC")
dt = datetime.datetime.now(localtz)
dt = dt.replace(
year=2018, month=5, day=9, hour=21, minute=20, second=0, microsecond=0
)
session_factory = self.replay_flight_data("test_rds_mark_hours")
session = session_factory(region="us-east-1")
rds = session.client("rds")
policy = self.load_policy(
{
"name": "rds-mark-5-hours",
"resource": "rds",
"filters": [{"tag:CreatorName": "absent"}],
"actions": [{"type": "mark-for-op", "hours": 5, "op": "delete"}],
},
config={"account_id": "123456789012"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
resource = rds.list_tags_for_resource(
ResourceName=resources[0]["DBInstanceArn"]
)
tags = [t["Value"] for t in resource["TagList"] if t["Key"] == "maid_status"]
result = datetime.datetime.strptime(
tags[0].strip().split("@", 1)[-1], "%Y/%m/%d %H%M %Z"
).replace(
tzinfo=localtz
)
self.assertEqual(result, dt)
def test_rds_marked_hours(self):
session_factory = self.replay_flight_data("test_rds_marked_hours")
policy = self.load_policy(
{
"name": "rds-marked-for-op-hours",
"resource": "rds",
"filters": [{"type": "marked-for-op", "op": "delete"}],
},
config={"account_id": "123456789012"},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DBInstanceIdentifier"], "db1")
def test_rds_default_vpc(self):
session_factory = self.replay_flight_data("test_rds_default_vpc")
p = self.load_policy(
{
"name": "rds-default-filters",
"resource": "rds",
"filters": [{"type": "default-vpc"}],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rds_kms_alias(self):
session_factory = self.replay_flight_data("test_rds_kms_alias")
p = self.load_policy(
{
"name": "rds-aws-managed-kms-keys-filters",
"resource": "rds",
"filters": [
{
"type": "kms-alias",
"key": "AliasName",
"value": "^(alias/aws/)",
"op": "regex",
}
],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rds_snapshot(self):
session_factory = self.replay_flight_data("test_rds_snapshot")
dt = datetime.datetime.now().replace(
year=2017, month=12, day=11, hour=14, minute=9
)
suffix = dt.strftime("%Y-%m-%d-%H-%M")
p = self.load_policy(
{
"name": "rds-snapshot",
"resource": "rds",
"filters": [{"DBInstanceIdentifier": "c7n-snapshot-test"}],
"actions": [{"type": "snapshot"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("rds")
snapshot = client.describe_db_snapshots(
DBInstanceIdentifier=resources[0]["DBInstanceIdentifier"]
)[
"DBSnapshots"
][
0
]
self.assertEqual(
snapshot["DBSnapshotIdentifier"],
"backup-%s-%s" % (resources[0]["DBInstanceIdentifier"], suffix),
)
def test_rds_retention(self):
session_factory = self.replay_flight_data("test_rds_retention")
p = self.load_policy(
{
"name": "rds-snapshot",
"resource": "rds",
"actions": [{"type": "retention", "days": 21}],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 5)
def test_rds_retention_copy_tags(self):
session_factory = self.replay_flight_data("test_rds_retention")
p = self.load_policy(
{
"name": "rds-snapshot",
"resource": "rds",
"actions": [{"type": "retention", "days": 21, "copy-tags": True}],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 5)
def test_rds_restore(self):
self.patch(rds.RestoreInstance, "executor_factory", MainThreadExecutor)
self.change_environment(AWS_DEFAULT_REGION="us-east-2")
session_factory = self.replay_flight_data("test_rds_restore")
client = session_factory().client("rds")
instance_id = "mxtest"
self.assertRaises(
ClientError, client.describe_db_instances, DBInstanceIdentifier=instance_id
)
p = self.load_policy(
{
"name": "rds-restore",
"resource": "rds-snapshot",
"filters": [{"MasterUsername": "mxtester"}, "latest"],
"actions": [
{
"type": "restore",
"restore_options": {"DBInstanceIdentifier": instance_id},
}
],
},
config=dict(region="us-east-2"),
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
try:
client.describe_db_instances(DBInstanceIdentifier=instance_id)
except ClientError:
self.fail("DB Not found")
def test_rds_delete_copy(self):
session_factory = self.replay_flight_data("test_rds_delete_copy_restore")
client = session_factory().client("rds")
instance_id = "mxtest"
db_info = client.describe_db_instances(DBInstanceIdentifier=instance_id)
self.assertFalse(db_info["DBInstances"][0]["CopyTagsToSnapshot"])
p = self.load_policy(
{
"name": "rds-delete",
"resource": "rds",
"filters": [{"DBInstanceIdentifier": instance_id}],
"actions": [{"type": "delete", "copy-restore-info": True}],
},
config=dict(region="us-east-2"),
session_factory=session_factory,
)
p.run()
db_info = client.describe_db_instances(DBInstanceIdentifier=instance_id)
self.assertTrue(db_info["DBInstances"][0]["CopyTagsToSnapshot"])
snaps = p.resource_manager.get_resource_manager("rds-snapshot").get_resources(
("final-mxtest-2017-05-25",)
)
snap_keys = {t["Key"] for t in snaps[0]["Tags"]}
self.assertTrue(snap_keys.issuperset(rds.RestoreInstance.restore_keys))
def test_rds_delete(self):
session_factory = self.replay_flight_data("test_rds_delete")
p = self.load_policy(
{
"name": "rds-delete",
"resource": "rds",
"filters": [{"tag:Owner": "test"}],
"actions": [{"type": "delete", "skip-snapshot": True}],
},
config=dict(region="us-west-2"),
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rds_available_engine_upgrades(self):
session_factory = self.replay_flight_data(
"test_rds_available_engine_upgrades", zdata=True
)
client = session_factory().client("rds")
upgrades = rds._get_available_engine_upgrades(client)
self.assertEqual(upgrades["postgres"]["9.3.1"], "9.3.14")
self.assertEqual(
upgrades["sqlserver-ex"]["10.50.6000.34.v1"], "10.50.6529.0.v1"
)
upgrades = rds._get_available_engine_upgrades(client, major=True)
self.assertEqual(upgrades["postgres"]["9.3.1"], "9.4.9")
self.assertEqual(upgrades["postgres"]["9.4.9"], "9.5.4")
self.assertEqual(upgrades["sqlserver-ex"]["10.50.2789.0.v1"], "12.00.5000.0.v1")
def test_rds_upgrade_available(self):
session_factory = self.replay_flight_data("test_rds_minor_upgrade_available")
p = self.load_policy(
{
"name": "rds-upgrade-available",
"resource": "rds",
"filters": [{"type": "upgrade-available", "major": True}],
"actions": []
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
{r["EngineVersion"]: r.get("c7n-rds-engine-upgrade") for r in resources},
{u"5.6.27": u"5.7.11"},
)
def test_rds_minor_upgrade_unavailable(self):
session_factory = self.replay_flight_data("test_rds_minor_upgrade_unavailable")
p = self.load_policy(
{
"name": "rds-upgrade-done",
"resource": "rds",
"filters": [{"type": "upgrade-available", "value": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
{r["EngineVersion"]: r.get("c7n-rds-engine-upgrade") for r in resources},
{u"5.5.41": u"5.5.46", u"5.6.29": None, u"5.7.11": None},
)
def test_rds_minor_upgrade_do(self):
session_factory = self.replay_flight_data("test_rds_minor_upgrade_do")
p = self.load_policy(
{
"name": "rds-upgrade-do",
"resource": "rds",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_upgrade",
"op": "upgrade",
"skew": 4,
}
],
"actions": [{"type": "upgrade", "immediate": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
{r["EngineVersion"]: r.get("c7n-rds-engine-upgrade") for r in resources},
{u"5.6.29": u"5.6.35"},
)
self.assertEqual(resources[0]["DBInstanceIdentifier"], "c7n-mysql-test-03")
self.assertEqual(resources[0]["EngineVersion"], "5.6.29")
self.assertEqual(resources[0]["c7n-rds-engine-upgrade"], "5.6.35")
def test_rds_eligible_start_stop(self):
resource = {"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "mysql"}
self.assertTrue(rds._eligible_start_stop(resource, "available"))
resource = {"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "stopped",
"Engine": "mysql"}
self.assertFalse(rds._eligible_start_stop(resource, "available"))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "postgres",
"MultiAZ": True,
}
self.assertTrue(rds._eligible_start_stop(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "sqlserver-ee",
"MultiAZ": True,
}
self.assertFalse(rds._eligible_start_stop(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "docdb"
}
self.assertFalse(rds._eligible_start_stop(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "postgres",
"ReadReplicaDBInstanceIdentifiers": ["sbbdevslave"],
}
self.assertFalse(rds._eligible_start_stop(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "mysql",
"ReadReplicaSourceDBInstanceIdentifier": "sbbdev",
}
self.assertFalse(rds._eligible_start_stop(resource))
def test_rds_db_instance_eligible_for_backup(self):
resource = {"DBInstanceIdentifier": "ABC"}
self.assertFalse(rds._db_instance_eligible_for_backup(resource))
resource = {"DBInstanceIdentifier": "ABC", "DBInstanceStatus": "funky"}
self.assertFalse(rds._db_instance_eligible_for_backup(resource))
resource = {"DBInstanceIdentifier": "ABC", "DBInstanceStatus": "available"}
self.assertTrue(rds._db_instance_eligible_for_backup(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"DBClusterIdentifier": "C1",
}
self.assertFalse(rds._db_instance_eligible_for_backup(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"ReadReplicaSourceDBInstanceIdentifier": "R1",
"Engine": "postgres",
}
self.assertFalse(rds._db_instance_eligible_for_backup(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "postgres",
}
self.assertTrue(rds._db_instance_eligible_for_backup(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"Engine": "mysql",
"EngineVersion": "5.5.1",
}
self.assertTrue(rds._db_instance_eligible_for_backup(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"ReadReplicaSourceDBInstanceIdentifier": "R1",
"Engine": "mysql",
"EngineVersion": "5.5.1",
}
self.assertFalse(rds._db_instance_eligible_for_backup(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"ReadReplicaSourceDBInstanceIdentifier": "R1",
"Engine": "mysql",
"EngineVersion": "5.7.1",
}
self.assertTrue(rds._db_instance_eligible_for_backup(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"ReadReplicaSourceDBInstanceIdentifier": "R1",
"Engine": "mysql",
"EngineVersion": "6.1.1",
}
self.assertTrue(rds._db_instance_eligible_for_backup(resource))
def test_rds_db_instance_eligible_for_final_snapshot(self):
resource = {"DBInstanceIdentifier": "ABC"}
self.assertTrue(rds._db_instance_eligible_for_final_snapshot(resource))
resource = {"DBInstanceIdentifier": "ABC", "DBInstanceStatus": "available"}
self.assertTrue(rds._db_instance_eligible_for_final_snapshot(resource))
resource = {"DBInstanceIdentifier": "ABC", "DBInstanceStatus": "creating"}
self.assertFalse(rds._db_instance_eligible_for_final_snapshot(resource))
resource = {"DBInstanceIdentifier": "ABC", "DBInstanceStatus": "failed"}
self.assertFalse(rds._db_instance_eligible_for_final_snapshot(resource))
resource = {
"DBInstanceIdentifier": "ABC", "DBInstanceStatus": "incompatible-restore"
}
self.assertFalse(rds._db_instance_eligible_for_final_snapshot(resource))
resource = {
"DBInstanceIdentifier": "ABC", "DBInstanceStatus": "incompatible-network"
}
self.assertFalse(rds._db_instance_eligible_for_final_snapshot(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"ReadReplicaSourceDBInstanceIdentifier": "R1",
"Engine": "mysql",
"EngineVersion": "5.7.1",
}
self.assertFalse(rds._db_instance_eligible_for_final_snapshot(resource))
resource = {
"DBInstanceIdentifier": "ABC",
"DBInstanceStatus": "available",
"ReadReplicaSourceDBInstanceIdentifier": "",
"Engine": "mysql",
"EngineVersion": "5.7.1",
}
self.assertTrue(rds._db_instance_eligible_for_final_snapshot(resource))
def test_rds_db_subnetgroup_delete(self):
session_factory = self.replay_flight_data("test_rdssubnetgroup_delete")
policy = self.load_policy(
{
"name": "db-subnet-group-delete",
"resource": "rds-subnet-group",
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertGreater(len(resources), 0, "Test should delete db subnetgroup")
def test_rds_db_subnetgroup_unused(self):
session_factory = self.replay_flight_data("test_rdssubnetgroup_unused")
policy = self.load_policy(
{
"name": "db-subnet-group-unused",
"resource": "rds-subnet-group",
"filters": [
{'DBSubnetGroupName': 'not-used'},
{"type": "unused"}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1, "Resources should be unused")
self.assertEqual(resources[0]['DBSubnetGroupName'], "not-used")
def test_rds_modify_db(self):
session_factory = self.replay_flight_data("test_rds_modify_db")
p = self.load_policy(
{
"name": "rds-modify-db",
"resource": "rds",
"filters": [
{"DeletionProtection": True},
{"MasterUsername": "testtest"}
],
"actions": [
{
"type": "modify-db",
"update": [
{
"property": 'DeletionProtection',
"value": False
}
],
"immediate": True
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("rds")
db_info = client.describe_db_instances(DBInstanceIdentifier="testtest")
self.assertFalse(db_info["DBInstances"][0]["DeletionProtection"])
def test_rds_modify_db_enable_cloudwatch(self):
session_factory = self.replay_flight_data("test_rds_modify_db_enable_cloudwatch")
p = self.load_policy(
{
"name": "rds-modify-enable-cloudwatch",
"resource": "rds",
"filters": [
{
"type": "value",
"key": "DBInstanceIdentifier",
"value": "database-2"
},
{
"type": "value",
"key": "EnabledCloudwatchLogsExports[]",
"value": [
"error"
],
"op": "ni"
}
],
"actions": [
{
"type": "modify-db",
"update": [
{
"property": 'CloudwatchLogsExportConfiguration',
"value": {
'EnableLogTypes': [
"error"
]
}
}
],
"immediate": True
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("rds")
db_info = client.describe_db_instances(DBInstanceIdentifier="database-2")
self.assertIn('error', db_info["DBInstances"][0]["EnabledCloudwatchLogsExports"])
def test_rds_modify_db_validation_monitoring_error(self):
with self.assertRaises(PolicyValidationError) as err:
self.load_policy({
'name': 'enable-monitoring',
'resource': 'rds',
"actions": [
{
"type": "modify-db",
"update": [
{
"property": 'MonitoringInterval',
"value": 60
}
],
"immediate": True
}
]})
self.assertIn((
'A MonitoringRoleARN value is required'),
str(err.exception))
def test_rds_modify_db_validation_cloudwatch_error(self):
with self.assertRaises(PolicyValidationError) as err:
self.load_policy({
'name': 'enable-cloudwatch',
'resource': 'rds',
"actions": [
{
"type": "modify-db",
"update": [
{
"property": 'CloudwatchLogsExportConfiguration',
"value": [
"error"
]
}
],
"immediate": True
}
]})
self.assertIn((
'EnableLogTypes or DisableLogTypes input list is required'),
str(err.exception))
def test_rds_modify_db_enable_perfinsights(self):
session_factory = self.replay_flight_data("test_rds_modify_db_enable_perfinsights")
p = self.load_policy(
{
"name": "rds-modify-enable-perfinsights",
"resource": "rds",
"filters": [
{
"type": "value",
"key": "DBInstanceIdentifier",
"value": "database-4"
},
{
"type": "value",
"key": "PerformanceInsightsEnabled",
"value": False
}
],
"actions": [
{
"type": "modify-db",
"update": [
{
"property": "EnablePerformanceInsights",
"value": True
}
],
"immediate": True
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("rds")
db_info = client.describe_db_instances(DBInstanceIdentifier="database-4")
self.assertTrue(db_info["DBInstances"][0]["PerformanceInsightsEnabled"])
class RDSSnapshotTest(BaseTest):
def test_rds_snapshot_copy_tags_enable(self):
session_factory = self.replay_flight_data("test_rds_snapshot_copy_tags_enable")
client = session_factory(region="us-east-1").client("rds")
self.assertFalse(
client.describe_db_instances(DBInstanceIdentifier="mydbinstance")[
"DBInstances"
][
0
][
"CopyTagsToSnapshot"
]
)
p = self.load_policy(
{
"name": "rds-enable-snapshot-tag-copy",
"resource": "rds",
"filters": [
{"type": "value", "key": "Engine", "value": "mysql", "op": "eq"}
],
"actions": [{"type": "set-snapshot-copy-tags", "enable": True}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertTrue(len(resources), 1)
self.assertEqual(resources[0]["DBInstanceIdentifier"], "mydbinstance")
self.assertTrue(
client.describe_db_instances(DBInstanceIdentifier="mydbinstance")[
"DBInstances"
][
0
][
"CopyTagsToSnapshot"
]
)
def test_rds_snapshot_copy_tags_disable(self):
session_factory = self.replay_flight_data("test_rds_snapshot_copy_tags_disable")
client = session_factory(region="us-east-1").client("rds")
self.assertTrue(
client.describe_db_instances(DBInstanceIdentifier="mydbinstance")[
"DBInstances"
][
0
][
"CopyTagsToSnapshot"
]
)
p = self.load_policy(
{
"name": "rds-enable-snapshot-tag-copy",
"resource": "rds",
"filters": [
{"type": "value", "key": "Engine", "value": "mysql", "op": "eq"}
],
"actions": [{"type": "set-snapshot-copy-tags", "enable": False}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertTrue(len(resources), 1)
self.assertEqual(resources[0]["DBInstanceIdentifier"], "mydbinstance")
self.assertFalse(
client.describe_db_instances(DBInstanceIdentifier="mydbinstance")[
"DBInstances"
][
0
][
"CopyTagsToSnapshot"
]
)
def test_rds_snapshot_access(self):
factory = self.replay_flight_data("test_rds_snapshot_access")
p = self.load_policy(
{
"name": "rds-snap-access",
"resource": "rds-snapshot",
"filters": ["cross-account"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{
r["DBSnapshotIdentifier"]: r["c7n:CrossAccountViolations"]
for r in resources
},
{"tidx-pub": ["all"], "tidx-rdx": ["619193117841"]},
)
def test_rds_latest_manual(self):
# preconditions
# one db with manual and automatic snapshots
factory = self.replay_flight_data("test_rds_snapshot_latest")
p = self.load_policy(
{
"name": "rds-latest-snaps",
"resource": "rds-snapshot",
"filters": [{"type": "latest", "automatic": False}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DBSnapshotIdentifier"], "originb")
def test_rds_latest(self):
# preconditions
# one db with manual and automatic snapshots
factory = self.replay_flight_data("test_rds_snapshot_latest")
p = self.load_policy(
{
"name": "rds-latest-snaps",
"resource": "rds-snapshot",
"filters": ["latest"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]["DBSnapshotIdentifier"], "rds:originb-2016-12-28-09-15"
)
def test_rds_cross_region_copy_lambda(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{
"name": "rds-copy-fail",
"resource": "rds-snapshot",
"mode": {"type": "config-rule"},
"actions": [{"type": "region-copy", "target_region": "us-east-2"}],
},
)
def test_rds_cross_region_copy_skip_same_region(self):
factory = self.replay_flight_data("test_rds_snapshot_latest")
output = self.capture_logging("custodian.actions")
p = self.load_policy(
{
"name": "rds-copy-skip",
"resource": "rds-snapshot",
"actions": [{"type": "region-copy", "target_region": "us-east-2"}],
},
config={'region': 'us-east-2'},
session_factory=factory,
)
resources = p.run()
self.assertFalse([r for r in resources if "c7n:CopiedSnapshot" in r])
self.assertIn("Source and destination region are the same", output.getvalue())
def test_rds_cross_region_copy_many(self):
# preconditions
# rds snapshot, encrypted in region with kms, and tags
# in this scenario we have 9 snapshots in source region,
# 3 snaps already in target region, 6 to copy, which means
# we will hit transfer limits.
factory = self.replay_flight_data("test_rds_snapshot_region_copy_many")
# no sleep till, beastie boys ;-)
def brooklyn(delay):
return
output = self.capture_logging("c7n.retry", level=logging.DEBUG)
self.patch(time, "sleep", brooklyn)
self.change_environment(AWS_DEFAULT_REGION="us-east-1")
p = self.load_policy(
{
"name": "rds-snapshot-region-copy",
"resource": "rds-snapshot",
"filters": [{"DBInstanceIdentifier": "originb"}],
"actions": [
{
"type": "region-copy",
"target_region": "us-east-2",
"tags": {"migrated_from": "us-east-1"},
"target_key": "cb291f53-f3ab-4e64-843e-47b0a7c9cf61",
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 9)
self.assertEqual(6, len([r for r in resources if "c7n:CopiedSnapshot" in r]))
self.assertEqual(output.getvalue().count("retrying"), 2)
def test_rds_cross_region_copy(self):
# preconditions
# rds snapshot, encrypted in region with kms, and tags
factory = self.replay_flight_data("test_rds_snapshot_region_copy")
client = factory().client("rds", region_name="us-east-2")
self.change_environment(AWS_DEFAULT_REGION="us-east-1")
p = self.load_policy(
{
"name": "rds-snapshot-region-copy",
"resource": "rds-snapshot",
"filters": [{"DBSnapshotIdentifier": "rds:originb-2016-12-28-09-15"}],
"actions": [
{
"type": "region-copy",
"target_region": "us-east-2",
"tags": {"migrated_from": "us-east-1"},
"target_key": "cb291f53-f3ab-4e64-843e-47b0a7c9cf61",
}
],
},
config=dict(region="us-east-1"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
snapshots = client.describe_db_snapshots(
DBSnapshotIdentifier=resources[0]["c7n:CopiedSnapshot"].rsplit(":", 1)[1]
)[
"DBSnapshots"
]
self.assertEqual(len(snapshots), 1)
self.assertEqual(snapshots[0]["DBInstanceIdentifier"], "originb")
tags = {
t["Key"]: t["Value"]
for t in client.list_tags_for_resource(
ResourceName=resources[0]["c7n:CopiedSnapshot"]
)[
"TagList"
]
}
self.assertEqual(
{
"migrated_from": "us-east-1",
"app": "mgmt-portal",
"env": "staging",
"workload-type": "other",
},
tags,
)
def test_rds_snapshot_tag_filter(self):
factory = self.replay_flight_data("test_rds_snapshot_tag_filter")
client = factory().client("rds")
p = self.load_policy(
{
"name": "rds-snapshot-tag-filter",
"resource": "rds-snapshot",
"filters": [{"type": "marked-for-op", "op": "delete"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["DBSnapshotIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("maid_status" in tag_map)
self.assertTrue("delete@" in tag_map["maid_status"])
def test_rds_snapshot_age_filter(self):
factory = self.replay_flight_data("test_rds_snapshot_age_filter")
p = self.load_policy(
{
"name": "rds-snapshot-age-filter",
"resource": "rds-snapshot",
"filters": [{"type": "age", "days": 7}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rds_snapshot_trim(self):
factory = self.replay_flight_data("test_rds_snapshot_delete")
p = self.load_policy(
{
"name": "rds-snapshot-trim",
"resource": "rds-snapshot",
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rds_snapshot_tag(self):
factory = self.replay_flight_data("test_rds_snapshot_mark")
client = factory().client("rds")
p = self.load_policy(
{
"name": "rds-snapshot-tag",
"resource": "rds-snapshot",
"actions": [{"type": "tag", "key": "test-key", "value": "test-value"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["DBSnapshotIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("test-key" in tag_map)
self.assertTrue("test-value" in tag_map["test-key"])
def test_rds_snapshot_mark(self):
factory = self.replay_flight_data("test_rds_snapshot_mark")
client = factory().client("rds")
p = self.load_policy(
{
"name": "rds-snapshot-mark",
"resource": "rds-snapshot",
"actions": [{"type": "mark-for-op", "op": "delete", "days": 1}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["DBSnapshotIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("maid_status" in tag_map)
def test_rds_snapshot_unmark(self):
factory = self.replay_flight_data("test_rds_snapshot_unmark")
client = factory().client("rds")
p = self.load_policy(
{
"name": "rds-snapshot-unmark",
"resource": "rds-snapshot",
"actions": [{"type": "unmark"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["DBSnapshotIdentifier"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertFalse("maid_status" in tag_map)
def test_rds_public_accessible_disable(self):
session_factory = self.replay_flight_data("test_rds_public_accessible_disable")
client = session_factory(region="us-east-1").client("rds")
policy = self.load_policy(
{
"name": "disable-publicly-accessibility",
"resource": "rds",
"filters": [
{"DBInstanceIdentifier": "c7n-test-pa"},
{"PubliclyAccessible": True},
],
"actions": [{"type": "set-public-access", "state": False}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DBInstanceIdentifier"], "c7n-test-pa")
self.assertFalse(
client.describe_db_instances(DBInstanceIdentifier="c7n-test-pa")[
"DBInstances"
][
0
][
"PubliclyAccessible"
]
)
class TestModifyVpcSecurityGroupsAction(BaseTest):
def test_rds_remove_matched_security_groups(self):
#
# Test conditions:
# - running 2 Aurora DB clusters in default VPC with 2 instances
# each.
# - translates to 4 actual instances
# - a default security group with id 'sg-7a3fcb13' exists
# - security group named PROD-ONLY-Test-Security-Group exists in
# VPC and is attached to one set of DB instances
# - translates to 2 instances marked non-compliant
#
# Results in 4 DB Instances with default Security Group attached
session_factory = self.replay_flight_data(
"test_rds_remove_matched_security_groups"
)
p = self.load_policy(
{
"name": "rds-remove-matched-security-groups",
"resource": "rds",
"filters": [
{
"type": "security-group",
"key": "GroupName",
"value": "(.*PROD-ONLY.*)",
"op": "regex",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "matched",
"isolation-group": "sg-7a3fcb13",
}
],
},
session_factory=session_factory,
)
clean_p = self.load_policy(
{
"name": "rds-verify-remove-matched-security-groups",
"resource": "rds",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
clean_resources = clean_p.run()
# clusters autoscale across AZs, so they get -001, -002, etc appended
self.assertIn("test-sg-fail", resources[0]["DBInstanceIdentifier"])
self.assertEqual(len(resources), 2)
self.assertEqual(len(resources[0]["VpcSecurityGroups"]), 1)
# show that it was indeed a replacement of security groups
self.assertEqual(len(clean_resources[0]["VpcSecurityGroups"]), 1)
self.assertEqual(len(clean_resources), 4)
def test_rds_add_security_group(self):
#
# Test conditions:
# - running 2 Aurora DB clusters in default VPC with 2 instances each
# - translates to 4 actual instances
# - a default security group with id 'sg-7a3fcb13' exists -
# attached to all instances
# - security group named PROD-ONLY-Test-Security-Group exists in
# VPC and is attached to 2/4 instances
# - translates to 2 instances marked to get new group attached
#
# Results in 4 instances with default Security Group and
# PROD-ONLY-Test-Security-Group
session_factory = self.replay_flight_data("test_rds_add_security_group")
p = self.load_policy(
{
"name": "add-sg-to-prod-rds",
"resource": "rds",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "value",
"key": "DBInstanceIdentifier",
"value": "test-sg-fail.*",
"op": "regex",
},
],
"actions": [{"type": "modify-security-groups", "add": "sg-6360920a"}],
},
session_factory=session_factory,
)
clean_p = self.load_policy(
{
"name": "validate-add-sg-to-prod-rds",
"resource": "rds",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "security-group",
"key": "GroupName",
"value": "PROD-ONLY-Test-Security-Group",
},
],
},
session_factory=session_factory,
)
resources = p.run()
clean_resources = clean_p.run()
self.assertEqual(len(resources), 2)
self.assertIn("test-sg-fail", resources[0]["DBInstanceIdentifier"])
self.assertEqual(len(resources[0]["VpcSecurityGroups"]), 1)
self.assertEqual(len(clean_resources[0]["VpcSecurityGroups"]), 2)
self.assertEqual(len(clean_resources), 4)
def test_rds_filter_by_vpcid(self):
#
# Test conditions:
# Purpose of test is only to validate checking vpc filtered ID with DBSubnetGroup.VpcId
# Uses the add_security_group data--should match 4 DB instances (all in the filtered VPC)
# Checks that the expected VPC is present
session_factory = self.replay_flight_data("test_rds_add_security_group")
p = self.load_policy(
{
"name": "filter-by-vpcid",
"resource": "rds",
"filters": [
{
"type": "vpc",
"key": "VpcId",
"value": "vpc-09b75e60",
"op": "eq",
},
],
"actions": [{"type": "modify-security-groups", "add": "sg-6360920a"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 4)
self.assertEqual("vpc-09b75e60", resources[0]["DBSubnetGroup"]["VpcId"])
class TestHealthEventsFilter(BaseTest):
def test_rds_health_events_filter(self):
session_factory = self.replay_flight_data("test_rds_health_events_filter")
policy = self.load_policy(
{
"name": "rds-health-events-filter",
"resource": "rds",
"filters": [{"type": "health-event"}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 0)
class TestRDSParameterGroupFilter(BaseTest):
PARAMGROUP_PARAMETER_FILTER_TEST_CASES = [
# filter_struct, test_func, err_message
(
{"key": "log_destination", "op": "eq", "value": "stderr"},
lambda r: len(r) == 1,
"instances with log_destination == stderr should be 1",
),
(
{"key": "log_destination", "op": "eq", "value": "s3"},
lambda r: len(r) == 0,
"instances with log_destination == s3 should be 0",
),
(
{"key": "log_destination", "op": "ne", "value": "stderr"},
lambda r: len(r) == 0,
"instances with log_destination != stderr should be 0",
),
(
{"key": "log_destination", "op": "ne", "value": "s3"},
lambda r: len(r) == 1,
"instances with log_destination != s3 should be 1",
),
(
{"key": "full_page_writes", "op": "eq", "value": True},
lambda r: len(r) == 1,
"full_page_writes ( a boolean ) should be on",
),
]
def test_param_value_cases(self):
session_factory = self.replay_flight_data("test_rds_param_filter")
policy = self.load_policy(
{"name": "rds-pg-filter", "resource": "rds"},
session_factory=session_factory,
)
resources = policy.resource_manager.resources()
for testcase in self.PARAMGROUP_PARAMETER_FILTER_TEST_CASES:
fdata, assertion, err_msg = testcase
f = policy.resource_manager.filter_registry.get("db-parameter")(
fdata, policy.resource_manager
)
f_resources = f.process(resources)
if not assertion(f_resources):
print(len(f_resources), fdata, assertion)
self.fail(err_msg)
class Resize(BaseTest):
def get_waiting_client(self, session_factory, session, name):
if session_factory.__name__ == "<lambda>": # replaying
return None
else: # recording
return boto3.Session(region_name=session.region_name).client(name)
def get_dbid(self, recording, flight_data):
if recording:
return "test-" + str(uuid.uuid4())
else:
pill_path = os.path.join(
os.path.dirname(__file__),
"data",
"placebo",
flight_data,
"rds.CreateDBInstance_1.json",
)
pill = json.load(open(pill_path))
return pill["data"]["DBInstance"]["DBInstanceIdentifier"]
def install_modification_pending_waiter(self, waiters):
if "DBInstanceModificationPending" in waiters:
return
pattern = waiters["DBInstanceAvailable"]
acceptors = [OrderedDict(eg) for eg in pattern["acceptors"][1:]]
acceptors.insert(
0,
OrderedDict(
expected=True,
matcher="path",
state="success",
argument="!!length(DBInstances[].PendingModifiedValues)",
),
)
waiter = OrderedDict(pattern)
waiter["acceptors"] = acceptors
waiters["DBInstanceModificationPending"] = waiter
def install_modifying_waiter(self, waiters):
if "DBInstanceModifying" in waiters:
return
pattern = waiters["DBInstanceAvailable"]
acceptors = [OrderedDict(eg) for eg in pattern["acceptors"]]
acceptors[0]["expected"] = "modifying"
waiter = OrderedDict(pattern)
waiter["acceptors"] = acceptors
waiters["DBInstanceModifying"] = waiter
def install_waiters(self, client):
# Not provided by boto otb.
client._get_waiter_config() # primes cache if needed
waiters = client._cache["waiter_config"]["waiters"]
self.install_modification_pending_waiter(waiters)
self.install_modifying_waiter(waiters)
def wait_until(self, client, dbid, status):
if client is None:
return # We're in replay mode. Don't bother waiting.
self.install_waiters(client)
waiter = client.get_waiter("db_instance_" + status)
waiter.wait(Filters=[{"Name": "db-instance-id", "Values": [dbid]}])
def create_instance(self, client, dbid, gb=5):
client.create_db_instance(
Engine="mariadb",
DBInstanceIdentifier=dbid,
DBInstanceClass="db.r3.large",
MasterUsername="eric",
MasterUserPassword="cheese42",
StorageType="gp2",
AllocatedStorage=gb,
BackupRetentionPeriod=0,
) # disable automatic backups
def delete():
client.delete_db_instance(DBInstanceIdentifier=dbid, SkipFinalSnapshot=True)
self.addCleanup(delete)
return dbid
@staticmethod
def get_window_now():
start = datetime.datetime.utcnow()
end = start + datetime.timedelta(seconds=60 * 60) # hour long
fmt = "%a:%H:%M"
return "{}-{}".format(start.strftime(fmt), end.strftime(fmt))
def test_can_get_a_window_now(self):
assert re.match(r"[A-Za-z]{3}:\d\d:\d\d", self.get_window_now())
def start(self, flight_data):
session_factory = self.replay_flight_data(flight_data)
session = session_factory(region="us-west-2")
client = session.client("rds")
waiting_client = self.get_waiting_client(session_factory, session, "rds")
dbid = self.get_dbid(bool(waiting_client), flight_data)
self.create_instance(client, dbid)
wait_until = lambda state: self.wait_until(waiting_client, dbid, state) # NOQA
wait_until("available")
describe = lambda: client.describe_db_instances(DBInstanceIdentifier=dbid)[ # NOQA
"DBInstances"
][
0
]
def resize(**kw):
action = {"type": "resize", "percent": 10}
action.update(kw)
policy = self.load_policy(
{
"name": "rds-resize-up",
"resource": "rds",
"filters": [
{"type": "value", "key": "DBInstanceIdentifier", "value": dbid}
],
"actions": [action],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
policy.run()
return client, dbid, resize, wait_until, describe
def test_can_resize_up_asynchronously(self):
flight = "test_rds_resize_up_asynchronously"
client, dbid, resize, wait_until, describe = self.start(flight)
resize()
wait_until("modification_pending")
client.modify_db_instance(
DBInstanceIdentifier=dbid, PreferredMaintenanceWindow=self.get_window_now()
)
wait_until("modifying")
wait_until("available")
self.assertEqual(describe()["AllocatedStorage"], 6) # nearest gigabyte
def test_can_resize_up_immediately(self):
flight = "test_rds_resize_up_immediately"
_, _, resize, wait_until, describe = self.start(flight)
resize(immediate=True)
wait_until("modifying")
wait_until("available")
self.assertEqual(describe()["AllocatedStorage"], 6) # nearest gigabyte
class TestReservedRDSInstance(BaseTest):
def test_reserved_rds_instance_query(self):
session_factory = self.replay_flight_data("test_reserved_rds_instance_query")
p = self.load_policy(
{
"name": "filter-rds-reserved-instances",
"resource": "aws.rds-reserved"
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ReservedDBInstanceId"], "ri-2019-05-06-14-19-06-332")
class RDSEventSubscription(BaseTest):
def test_rds_event_subscription_delete(self):
session_factory = self.replay_flight_data("test_rds_event_subscription_delete")
p = self.load_policy(
{
"name": "rds-event-subscription-delete",
"resource": "aws.rds-subscription",
"filters": [{"type": "value", "key": "tag:name", "value": "pratyush"}],
"actions": [{"type": "delete"}]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["CustSubscriptionId"], "c7n-test-pratyush")
client = session_factory().client("rds")
response = client.describe_event_subscriptions()
self.assertEqual(len(response.get('EventSubscriptionsList')), 0)
|
|
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from collections import deque
import sys
import traceback
import fixtures
import mock
import netaddr
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from nova.compute import manager
from nova.console import type as ctype
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_block_device
from nova.tests.unit.image import fake as fake_image
from nova.tests.unit import utils as test_utils
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.virt import block_device as driver_block_device
from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt import hardware
from nova.virt import libvirt
from nova.virt.libvirt import imagebackend
LOG = logging.getLogger(__name__)
def catch_notimplementederror(f):
"""Decorator to simplify catching drivers raising NotImplementedError
If a particular call makes a driver raise NotImplementedError, we
log it so that we can extract this information afterwards as needed.
"""
def wrapped_func(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except NotImplementedError:
frame = traceback.extract_tb(sys.exc_info()[2])[-1]
LOG.error("%(driver)s does not implement %(method)s "
"required for test %(test)s" %
{'driver': type(self.connection),
'method': frame[2], 'test': f.__name__})
wrapped_func.__name__ = f.__name__
wrapped_func.__doc__ = f.__doc__
return wrapped_func
class _FakeDriverBackendTestCase(object):
def _setup_fakelibvirt(self):
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
# default instances_path which doesn't exist
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# Put fakelibvirt in place
if 'libvirt' in sys.modules:
self.saved_libvirt = sys.modules['libvirt']
else:
self.saved_libvirt = None
import nova.tests.unit.virt.libvirt.fake_imagebackend as \
fake_imagebackend
import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \
fake_libvirt_utils
import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt
import nova.tests.unit.virt.libvirt.fake_os_brick_connector as \
fake_os_brick_connector
sys.modules['libvirt'] = fakelibvirt
import nova.virt.libvirt.driver
import nova.virt.libvirt.firewall
import nova.virt.libvirt.host
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.imagebackend',
fake_imagebackend))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.firewall.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.connector',
fake_os_brick_connector))
fakelibvirt.disable_event_thread(self)
self.flags(rescue_image_id="2",
rescue_kernel_id="3",
rescue_ramdisk_id=None,
snapshots_directory='./',
sysinfo_serial='none',
group='libvirt')
def fake_extend(image, size):
pass
def fake_migrate(_self, destination, params=None, flags=0,
domain_xml=None, bandwidth=0):
pass
def fake_make_drive(_self, _path):
pass
def fake_get_instance_disk_info(_self, instance, xml=None,
block_device_info=None):
return '[]'
def fake_delete_instance_files(_self, _instance):
pass
def fake_wait():
pass
def fake_detach_device_with_retry(_self, get_device_conf_func, device,
persistent, live,
max_retry_count=7,
inc_sleep_time=2,
max_sleep_time=30):
# Still calling detach, but instead of returning function
# that actually checks if device is gone from XML, just continue
# because XML never gets updated in these tests
_self.detach_device(get_device_conf_func(device),
persistent=persistent,
live=live)
return fake_wait
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'_get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(nova.virt.libvirt.driver.disk,
'extend', fake_extend)
self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
'delete_instance_files',
fake_delete_instance_files)
self.stubs.Set(nova.virt.libvirt.guest.Guest,
'detach_device_with_retry',
fake_detach_device_with_retry)
self.stubs.Set(nova.virt.libvirt.guest.Guest,
'migrate', fake_migrate)
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_make_drive)
def _teardown_fakelibvirt(self):
# Restore libvirt
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
def setUp(self):
super(_FakeDriverBackendTestCase, self).setUp()
# TODO(sdague): it would be nice to do this in a way that only
# the relevant backends where replaced for tests, though this
# should not harm anything by doing it for all backends
fake_image.stub_out_image_service(self)
self._setup_fakelibvirt()
def tearDown(self):
fake_image.FakeImageService_reset()
self._teardown_fakelibvirt()
super(_FakeDriverBackendTestCase, self).tearDown()
class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
"""Test that ComputeManager can successfully load both
old style and new style drivers and end up with the correct
final class.
"""
# if your driver supports being tested in a fake way, it can go here
new_drivers = {
'fake.FakeDriver': 'FakeDriver',
'libvirt.LibvirtDriver': 'LibvirtDriver'
}
def test_load_new_drivers(self):
for cls, driver in six.iteritems(self.new_drivers):
self.flags(compute_driver=cls)
# NOTE(sdague) the try block is to make it easier to debug a
# failure by knowing which driver broke
try:
cm = manager.ComputeManager()
except Exception as e:
self.fail("Couldn't load driver %s - %s" % (cls, e))
self.assertEqual(cm.driver.__class__.__name__, driver,
"Could't load driver %s" % cls)
def test_fail_to_load_new_drivers(self):
self.flags(compute_driver='nova.virt.amiga')
def _fake_exit(error):
raise test.TestingException()
self.stubs.Set(sys, 'exit', _fake_exit)
self.assertRaises(test.TestingException, manager.ComputeManager)
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.connection = importutils.import_object(self.driver_module,
fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
# NOTE(dripton): resolve_driver_format does some file reading and
# writing and chowning that complicate testing too much by requiring
# using real directories with proper permissions. Just stub it out
# here; we test it in test_imagebackend.py
self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
imagebackend.Image._get_driver_format)
def _get_running_instance(self, obj=True):
instance_ref = test_utils.get_test_instance(obj=obj)
network_info = test_utils.get_test_network_info()
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
image_meta = test_utils.get_test_image_object(None, instance_ref)
self.connection.spawn(self.ctxt, instance_ref, image_meta,
[], 'herp', network_info=network_info)
return instance_ref, network_info
@catch_notimplementederror
def test_init_host(self):
self.connection.init_host('myhostname')
@catch_notimplementederror
def test_list_instances(self):
self.connection.list_instances()
@catch_notimplementederror
def test_list_instance_uuids(self):
self.connection.list_instance_uuids()
@catch_notimplementederror
def test_spawn(self):
instance_ref, network_info = self._get_running_instance()
domains = self.connection.list_instances()
self.assertIn(instance_ref['name'], domains)
num_instances = self.connection.get_num_instances()
self.assertEqual(1, num_instances)
@catch_notimplementederror
def test_snapshot_not_running(self):
instance_ref = test_utils.get_test_instance()
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
self.assertRaises(exception.InstanceNotRunning,
self.connection.snapshot,
self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_snapshot_running(self):
img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
instance_ref, network_info = self._get_running_instance()
self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
@catch_notimplementederror
def test_post_interrupted_snapshot_cleanup(self):
instance_ref, network_info = self._get_running_instance()
self.connection.post_interrupted_snapshot_cleanup(self.ctxt,
instance_ref)
@catch_notimplementederror
def test_reboot(self):
reboot_type = "SOFT"
instance_ref, network_info = self._get_running_instance()
self.connection.reboot(self.ctxt, instance_ref, network_info,
reboot_type)
@catch_notimplementederror
def test_get_host_ip_addr(self):
host_ip = self.connection.get_host_ip_addr()
# Will raise an exception if it's not a valid IP at all
ip = netaddr.IPAddress(host_ip)
# For now, assume IPv4.
self.assertEqual(ip.version, 4)
@catch_notimplementederror
def test_set_admin_password(self):
instance, network_info = self._get_running_instance(obj=True)
self.connection.set_admin_password(instance, 'p4ssw0rd')
@catch_notimplementederror
def test_inject_file(self):
instance_ref, network_info = self._get_running_instance()
self.connection.inject_file(instance_ref,
base64.b64encode('/testfile'),
base64.b64encode('testcontents'))
@catch_notimplementederror
def test_resume_state_on_host_boot(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
network_info)
@catch_notimplementederror
def test_rescue(self):
image_meta = objects.ImageMeta.from_dict({})
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info,
image_meta, '')
@catch_notimplementederror
def test_unrescue_unrescued_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_unrescue_rescued_instance(self):
image_meta = objects.ImageMeta.from_dict({})
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info,
image_meta, '')
self.connection.unrescue(instance_ref, network_info)
@catch_notimplementederror
def test_poll_rebooting_instances(self):
instances = [self._get_running_instance()]
self.connection.poll_rebooting_instances(10, instances)
@catch_notimplementederror
def test_migrate_disk_and_power_off(self):
instance_ref, network_info = self._get_running_instance()
flavor_ref = test_utils.get_test_flavor()
self.connection.migrate_disk_and_power_off(
self.ctxt, instance_ref, 'dest_host', flavor_ref,
network_info)
@catch_notimplementederror
def test_power_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
@catch_notimplementederror
def test_power_on_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_on(self.ctxt, instance_ref,
network_info, None)
@catch_notimplementederror
def test_power_on_powered_off(self):
instance_ref, network_info = self._get_running_instance()
self.connection.power_off(instance_ref)
self.connection.power_on(self.ctxt, instance_ref, network_info, None)
@catch_notimplementederror
def test_trigger_crash_dump(self):
instance_ref, network_info = self._get_running_instance()
self.connection.trigger_crash_dump(instance_ref)
@catch_notimplementederror
def test_soft_delete(self):
instance_ref, network_info = self._get_running_instance(obj=True)
self.connection.soft_delete(instance_ref)
@catch_notimplementederror
def test_restore_running(self):
instance_ref, network_info = self._get_running_instance()
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_restore_soft_deleted(self):
instance_ref, network_info = self._get_running_instance()
self.connection.soft_delete(instance_ref)
self.connection.restore(instance_ref)
@catch_notimplementederror
def test_pause(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
@catch_notimplementederror
def test_unpause_unpaused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_unpause_paused_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.pause(instance_ref)
self.connection.unpause(instance_ref)
@catch_notimplementederror
def test_suspend(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(self.ctxt, instance_ref)
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume(self.ctxt, instance_ref, network_info)
@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(self.ctxt, instance_ref)
self.connection.resume(self.ctxt, instance_ref, network_info)
@catch_notimplementederror
def test_destroy_instance_nonexistent(self):
fake_instance = test_utils.get_test_instance(obj=True)
network_info = test_utils.get_test_network_info()
self.connection.destroy(self.ctxt, fake_instance, network_info)
@catch_notimplementederror
def test_destroy_instance(self):
instance_ref, network_info = self._get_running_instance()
self.assertIn(instance_ref['name'],
self.connection.list_instances())
self.connection.destroy(self.ctxt, instance_ref, network_info)
self.assertNotIn(instance_ref['name'],
self.connection.list_instances())
@catch_notimplementederror
def test_get_volume_connector(self):
result = self.connection.get_volume_connector({'id': 'fake'})
self.assertIn('ip', result)
self.assertIn('initiator', result)
self.assertIn('host', result)
@catch_notimplementederror
def test_get_volume_connector_storage_ip(self):
ip = 'my_ip'
storage_ip = 'storage_ip'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
result = self.connection.get_volume_connector({'id': 'fake'})
self.assertIn('ip', result)
self.assertIn('initiator', result)
self.assertIn('host', result)
self.assertEqual(storage_ip, result['ip'])
@catch_notimplementederror
def test_attach_detach_volume(self):
instance_ref, network_info = self._get_running_instance()
connection_info = {
"driver_volume_type": "fake",
"serial": "fake_serial",
"data": {}
}
self.assertIsNone(
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda'))
self.assertIsNone(
self.connection.detach_volume(connection_info, instance_ref,
'/dev/sda'))
@catch_notimplementederror
def test_swap_volume(self):
instance_ref, network_info = self._get_running_instance()
self.assertIsNone(
self.connection.attach_volume(None, {'driver_volume_type': 'fake',
'data': {}},
instance_ref,
'/dev/sda'))
self.assertIsNone(
self.connection.swap_volume({'driver_volume_type': 'fake',
'data': {}},
{'driver_volume_type': 'fake',
'data': {}},
instance_ref,
'/dev/sda', 2))
@catch_notimplementederror
def test_attach_detach_different_power_states(self):
instance_ref, network_info = self._get_running_instance()
connection_info = {
"driver_volume_type": "fake",
"serial": "fake_serial",
"data": {}
}
self.connection.power_off(instance_ref)
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda')
bdm = {
'root_device_name': None,
'swap': None,
'ephemerals': [],
'block_device_mapping': driver_block_device.convert_volumes([
objects.BlockDeviceMapping(
self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'instance_uuid': instance_ref['uuid'],
'device_name': '/dev/sda',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': False,
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
'no_device': None
})),
])
}
bdm['block_device_mapping'][0]['connection_info'] = (
{'driver_volume_type': 'fake', 'data': {}})
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'):
self.connection.power_on(
self.ctxt, instance_ref, network_info, bdm)
self.connection.detach_volume(connection_info,
instance_ref,
'/dev/sda')
@catch_notimplementederror
def test_get_info(self):
instance_ref, network_info = self._get_running_instance()
info = self.connection.get_info(instance_ref)
self.assertIsInstance(info, hardware.InstanceInfo)
@catch_notimplementederror
def test_get_info_for_unknown_instance(self):
fake_instance = test_utils.get_test_instance(obj=True)
self.assertRaises(exception.NotFound,
self.connection.get_info,
fake_instance)
@catch_notimplementederror
def test_get_diagnostics(self):
instance_ref, network_info = self._get_running_instance(obj=True)
self.connection.get_diagnostics(instance_ref)
@catch_notimplementederror
def test_get_instance_diagnostics(self):
instance_ref, network_info = self._get_running_instance(obj=True)
instance_ref['launched_at'] = timeutils.utcnow()
self.connection.get_instance_diagnostics(instance_ref)
@catch_notimplementederror
def test_block_stats(self):
instance_ref, network_info = self._get_running_instance()
stats = self.connection.block_stats(instance_ref, 'someid')
self.assertEqual(len(stats), 5)
@catch_notimplementederror
def test_get_console_output(self):
fake_libvirt_utils.files['dummy.log'] = ''
instance_ref, network_info = self._get_running_instance()
console_output = self.connection.get_console_output(self.ctxt,
instance_ref)
self.assertIsInstance(console_output, six.string_types)
@catch_notimplementederror
def test_get_vnc_console(self):
instance, network_info = self._get_running_instance(obj=True)
vnc_console = self.connection.get_vnc_console(self.ctxt, instance)
self.assertIsInstance(vnc_console, ctype.ConsoleVNC)
@catch_notimplementederror
def test_get_spice_console(self):
instance_ref, network_info = self._get_running_instance()
spice_console = self.connection.get_spice_console(self.ctxt,
instance_ref)
self.assertIsInstance(spice_console, ctype.ConsoleSpice)
@catch_notimplementederror
def test_get_rdp_console(self):
instance_ref, network_info = self._get_running_instance()
rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref)
self.assertIsInstance(rdp_console, ctype.ConsoleRDP)
@catch_notimplementederror
def test_get_serial_console(self):
instance_ref, network_info = self._get_running_instance()
serial_console = self.connection.get_serial_console(self.ctxt,
instance_ref)
self.assertIsInstance(serial_console, ctype.ConsoleSerial)
@catch_notimplementederror
def test_get_mks_console(self):
instance_ref, network_info = self._get_running_instance()
mks_console = self.connection.get_mks_console(self.ctxt,
instance_ref)
self.assertIsInstance(mks_console, ctype.ConsoleMKS)
@catch_notimplementederror
def test_get_console_pool_info(self):
instance_ref, network_info = self._get_running_instance()
console_pool = self.connection.get_console_pool_info(instance_ref)
self.assertIn('address', console_pool)
self.assertIn('username', console_pool)
self.assertIn('password', console_pool)
@catch_notimplementederror
def test_refresh_security_group_rules(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_security_group_rules(1)
@catch_notimplementederror
def test_refresh_instance_security_rules(self):
# FIXME: Create security group and add the instance to it
instance_ref, network_info = self._get_running_instance()
self.connection.refresh_instance_security_rules(instance_ref)
@catch_notimplementederror
def test_ensure_filtering_for_instance(self):
instance = test_utils.get_test_instance(obj=True)
network_info = test_utils.get_test_network_info()
self.connection.ensure_filtering_rules_for_instance(instance,
network_info)
@catch_notimplementederror
def test_unfilter_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
self.connection.unfilter_instance(instance_ref, network_info)
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
fake_context = context.RequestContext('fake', 'fake')
migration = objects.Migration(context=fake_context, id=1)
migrate_data = objects.LibvirtLiveMigrateData(
migration=migration, bdms=[], block_migration=False)
self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
lambda *a: None, lambda *a: None,
migrate_data=migrate_data)
@catch_notimplementederror
def test_live_migration_force_complete(self):
instance_ref, network_info = self._get_running_instance()
self.connection.active_migrations[instance_ref.uuid] = deque()
self.connection.live_migration_force_complete(instance_ref)
@catch_notimplementederror
def test_live_migration_abort(self):
instance_ref, network_info = self._get_running_instance()
self.connection.live_migration_abort(instance_ref)
@catch_notimplementederror
def _check_available_resource_fields(self, host_status):
keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'hypervisor_type', 'hypervisor_version',
'hypervisor_hostname', 'cpu_info', 'disk_available_least',
'supported_instances']
for key in keys:
self.assertIn(key, host_status)
self.assertIsInstance(host_status['hypervisor_version'], int)
@catch_notimplementederror
def test_get_available_resource(self):
available_resource = self.connection.get_available_resource(
'myhostname')
self._check_available_resource_fields(available_resource)
@catch_notimplementederror
def test_get_available_nodes(self):
self.connection.get_available_nodes(False)
@catch_notimplementederror
def _check_host_cpu_status_fields(self, host_cpu_status):
self.assertIn('kernel', host_cpu_status)
self.assertIn('idle', host_cpu_status)
self.assertIn('user', host_cpu_status)
self.assertIn('iowait', host_cpu_status)
self.assertIn('frequency', host_cpu_status)
@catch_notimplementederror
def test_get_host_cpu_stats(self):
host_cpu_status = self.connection.get_host_cpu_stats()
self._check_host_cpu_status_fields(host_cpu_status)
@catch_notimplementederror
def test_set_host_enabled(self):
self.connection.set_host_enabled(True)
@catch_notimplementederror
def test_get_host_uptime(self):
self.connection.get_host_uptime()
@catch_notimplementederror
def test_host_power_action_reboot(self):
self.connection.host_power_action('reboot')
@catch_notimplementederror
def test_host_power_action_shutdown(self):
self.connection.host_power_action('shutdown')
@catch_notimplementederror
def test_host_power_action_startup(self):
self.connection.host_power_action('startup')
@catch_notimplementederror
def test_add_to_aggregate(self):
self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
@catch_notimplementederror
def test_remove_from_aggregate(self):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
def test_events(self):
got_events = []
def handler(event):
got_events.append(event)
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
event2 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_PAUSED)
self.connection.emit_event(event1)
self.connection.emit_event(event2)
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_RESUMED)
event4 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STOPPED)
self.connection.emit_event(event3)
self.connection.emit_event(event4)
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_bad_object(self):
# Passing in something which does not inherit
# from virtevent.Event
def handler(event):
pass
self.connection.register_event_listener(handler)
badevent = {
"foo": "bar"
}
self.assertRaises(ValueError,
self.connection.emit_event,
badevent)
def test_event_bad_callback(self):
# Check that if a callback raises an exception,
# it does not propagate back out of the
# 'emit_event' call
def handler(event):
raise Exception("Hit Me!")
self.connection.register_event_listener(handler)
event1 = virtevent.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
virtevent.EVENT_LIFECYCLE_STARTED)
self.connection.emit_event(event1)
def test_set_bootable(self):
self.assertRaises(NotImplementedError, self.connection.set_bootable,
'instance', True)
@catch_notimplementederror
def test_get_instance_disk_info(self):
# This should be implemented by any driver that supports live migrate.
instance_ref, network_info = self._get_running_instance()
self.connection.get_instance_disk_info(instance_ref,
block_device_info={})
@catch_notimplementederror
def test_get_device_name_for_instance(self):
instance, _ = self._get_running_instance()
self.connection.get_device_name_for_instance(
instance, [], mock.Mock(spec=objects.BlockDeviceMapping))
def test_network_binding_host_id(self):
# NOTE(jroll) self._get_running_instance calls spawn(), so we can't
# use it to test this method. Make a simple object instead; we just
# need instance.host.
instance = objects.Instance(self.ctxt, host='somehost')
self.assertEqual(instance.host,
self.connection.network_binding_host_id(self.ctxt, instance))
class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = "nova.virt.driver.ComputeDriver"
super(AbstractDriverTestCase, self).setUp()
def test_live_migration(self):
self.skipTest('Live migration is not implemented in the base '
'virt driver.')
class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
self.driver_module = 'nova.virt.fake.FakeDriver'
fake.set_nodes(['myhostname'])
super(FakeConnectionTestCase, self).setUp()
def _check_available_resource_fields(self, host_status):
super(FakeConnectionTestCase, self)._check_available_resource_fields(
host_status)
hypervisor_type = host_status['hypervisor_type']
supported_instances = host_status['supported_instances']
try:
# supported_instances could be JSON wrapped
supported_instances = jsonutils.loads(supported_instances)
except TypeError:
pass
self.assertTrue(any(hypervisor_type in x for x in supported_instances))
class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
# Point _VirtDriverTestCase at the right module
self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
super(LibvirtConnTestCase, self).setUp()
self.stubs.Set(self.connection,
'_set_host_enabled', mock.MagicMock())
self.useFixture(fixtures.MonkeyPatch(
'nova.context.get_admin_context',
self._fake_admin_context))
# This is needed for the live migration tests which spawn off the
# operation for monitoring.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
def _fake_admin_context(self, *args, **kwargs):
return self.ctxt
def test_force_hard_reboot(self):
self.flags(wait_soft_reboot_seconds=0, group='libvirt')
self.test_reboot()
def test_migrate_disk_and_power_off(self):
# there is lack of fake stuff to execute this method. so pass.
self.skipTest("Test nothing, but this method"
" needed to override superclass.")
def test_internal_set_host_enabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: False
service_mock.configure_mock(disabled_reason='None',
disabled=False)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)
self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!')
def test_set_host_enabled_when_auto_disabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: True, 'AUTO: ERROR'
service_mock.configure_mock(disabled_reason='AUTO: ERROR',
disabled=True)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertFalse(service_mock.disabled)
self.assertIsNone(service_mock.disabled_reason)
def test_set_host_enabled_when_manually_disabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertTrue(service_mock.disabled)
self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
def test_set_host_enabled_dont_override_manually_disabled(self):
self.mox.UnsetStubs()
service_mock = mock.MagicMock()
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)
self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
@catch_notimplementederror
@mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock):
instance_ref, network_info = self._get_running_instance()
self.connection.cleanup(self.ctxt, instance_ref, network_info,
destroy_vifs=False)
self.assertEqual(unplug_vifs_mock.call_count, 0)
@catch_notimplementederror
@mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock):
instance_ref, network_info = self._get_running_instance()
self.connection.cleanup(self.ctxt, instance_ref, network_info,
destroy_vifs=True)
self.assertEqual(unplug_vifs_mock.call_count, 1)
unplug_vifs_mock.assert_called_once_with(instance_ref,
network_info, True)
def test_get_device_name_for_instance(self):
self.skipTest("Tested by the nova.tests.unit.virt.libvirt suite")
@catch_notimplementederror
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch("nova.virt.libvirt.host.Host.has_min_version")
def test_set_admin_password(self, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
instance, network_info = self._get_running_instance(obj=True)
self.connection.set_admin_password(instance, 'p4ssw0rd')
|
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import operator
import time
import uuid
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
import tenacity
from neutron._i18n import _, _LE, _LI, _LW
from neutron.agent.common import utils
from neutron.agent.linux import ip_lib
from neutron.agent.ovsdb import api as ovsdb
from neutron.conf.agent import ovs_conf
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
UINT64_BITMASK = (1 << 64) - 1
# Special return value for an invalid OVS ofport
INVALID_OFPORT = -1
UNASSIGNED_OFPORT = []
# OVS bridge fail modes
FAILMODE_SECURE = 'secure'
FAILMODE_STANDALONE = 'standalone'
ovs_conf.register_ovs_agent_opts()
LOG = logging.getLogger(__name__)
OVS_DEFAULT_CAPS = {
'datapath_types': [],
'iface_types': [],
}
def _ofport_result_pending(result):
"""Return True if ovs-vsctl indicates the result is still pending."""
# ovs-vsctl can return '[]' for an ofport that has not yet been assigned
try:
int(result)
return False
except (ValueError, TypeError):
return True
def _ofport_retry(fn):
"""Decorator for retrying when OVS has yet to assign an ofport.
The instance's vsctl_timeout is used as the max waiting time. This relies
on the fact that instance methods receive self as the first argument.
"""
@six.wraps(fn)
def wrapped(*args, **kwargs):
self = args[0]
new_fn = tenacity.retry(
reraise=True,
retry=tenacity.retry_if_result(_ofport_result_pending),
wait=tenacity.wait_exponential(multiplier=0.01, max=1),
stop=tenacity.stop_after_delay(
self.vsctl_timeout))(fn)
return new_fn(*args, **kwargs)
return wrapped
class VifPort(object):
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=%s, vif_mac=%s, port_name=%s, ofport=%s, "
"bridge_name=%s") % (
self.vif_id, self.vif_mac,
self.port_name, self.ofport,
self.switch.br_name)
class BaseOVS(object):
def __init__(self):
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
self.ovsdb = ovsdb.API.get(self)
def add_bridge(self, bridge_name,
datapath_type=constants.OVS_DATAPATH_SYSTEM):
self.ovsdb.add_br(bridge_name,
datapath_type).execute()
return OVSBridge(bridge_name)
def delete_bridge(self, bridge_name):
self.ovsdb.del_br(bridge_name).execute()
def bridge_exists(self, bridge_name):
return self.ovsdb.br_exists(bridge_name).execute()
def port_exists(self, port_name):
cmd = self.ovsdb.db_get('Port', port_name, 'name')
return bool(cmd.execute(check_error=False, log_errors=False))
def get_bridge_for_iface(self, iface):
return self.ovsdb.iface_to_br(iface).execute()
def get_bridges(self):
return self.ovsdb.list_br().execute(check_error=True)
def get_bridge_external_bridge_id(self, bridge):
return self.ovsdb.br_get_external_id(bridge, 'bridge-id').execute()
def set_db_attribute(self, table_name, record, column, value,
check_error=False, log_errors=True):
self.ovsdb.db_set(table_name, record, (column, value)).execute(
check_error=check_error, log_errors=log_errors)
def clear_db_attribute(self, table_name, record, column):
self.ovsdb.db_clear(table_name, record, column).execute()
def db_get_val(self, table, record, column, check_error=False,
log_errors=True):
return self.ovsdb.db_get(table, record, column).execute(
check_error=check_error, log_errors=log_errors)
@property
def config(self):
"""A dict containing the only row from the root Open_vSwitch table
This row contains several columns describing the Open vSwitch install
and the system on which it is installed. Useful keys include:
datapath_types: a list of supported datapath types
iface_types: a list of supported interface types
ovs_version: the OVS version
"""
return self.ovsdb.db_list("Open_vSwitch").execute()[0]
@property
def capabilities(self):
_cfg = self.config
return {k: _cfg.get(k, OVS_DEFAULT_CAPS[k]) for k in OVS_DEFAULT_CAPS}
class OVSBridge(BaseOVS):
def __init__(self, br_name, datapath_type=constants.OVS_DATAPATH_SYSTEM):
super(OVSBridge, self).__init__()
self.br_name = br_name
self.datapath_type = datapath_type
self._default_cookie = generate_random_cookie()
@property
def default_cookie(self):
return self._default_cookie
def set_agent_uuid_stamp(self, val):
self._default_cookie = val
def set_controller(self, controllers):
self.ovsdb.set_controller(self.br_name,
controllers).execute(check_error=True)
def del_controller(self):
self.ovsdb.del_controller(self.br_name).execute(check_error=True)
def get_controller(self):
return self.ovsdb.get_controller(self.br_name).execute(
check_error=True)
def _set_bridge_fail_mode(self, mode):
self.ovsdb.set_fail_mode(self.br_name, mode).execute(check_error=True)
def set_secure_mode(self):
self._set_bridge_fail_mode(FAILMODE_SECURE)
def set_standalone_mode(self):
self._set_bridge_fail_mode(FAILMODE_STANDALONE)
def set_protocols(self, protocols):
self.set_db_attribute('Bridge', self.br_name, 'protocols', protocols,
check_error=True)
def create(self, secure_mode=False):
with self.ovsdb.transaction() as txn:
txn.add(
self.ovsdb.add_br(self.br_name,
datapath_type=self.datapath_type))
if secure_mode:
txn.add(self.ovsdb.set_fail_mode(self.br_name,
FAILMODE_SECURE))
def destroy(self):
self.delete_bridge(self.br_name)
def add_port(self, port_name, *interface_attr_tuples):
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.add_port(self.br_name, port_name))
if interface_attr_tuples:
txn.add(self.ovsdb.db_set('Interface', port_name,
*interface_attr_tuples))
return self.get_port_ofport(port_name)
def replace_port(self, port_name, *interface_attr_tuples):
"""Replace existing port or create it, and configure port interface."""
# NOTE(xiaohhui): If del_port is inside the transaction, there will
# only be one command for replace_port. This will cause the new port
# not be found by system, which will lead to Bug #1519926.
self.ovsdb.del_port(port_name).execute()
with self.ovsdb.transaction() as txn:
txn.add(self.ovsdb.add_port(self.br_name, port_name,
may_exist=False))
if interface_attr_tuples:
txn.add(self.ovsdb.db_set('Interface', port_name,
*interface_attr_tuples))
def delete_port(self, port_name):
self.ovsdb.del_port(port_name, self.br_name).execute()
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
# TODO(kevinbenton): This error handling is really brittle and only
# detects one specific type of failure. The callers of this need to
# be refactored to expect errors so we can re-raise and they can
# take appropriate action based on the type of error.
for i in range(1, 11):
try:
return utils.execute(full_args, run_as_root=True,
process_input=process_input)
except Exception as e:
if "failed to connect to socket" in str(e):
LOG.debug("Failed to connect to OVS. Retrying "
"in 1 second. Attempt: %s/10", i)
time.sleep(1)
continue
LOG.error(_LE("Unable to execute %(cmd)s. Exception: "
"%(exception)s"),
{'cmd': full_args, 'exception': e})
break
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
@_ofport_retry
def _get_port_ofport(self, port_name):
return self.db_get_val("Interface", port_name, "ofport")
def get_port_ofport(self, port_name):
"""Get the port's assigned ofport, retrying if not yet assigned."""
ofport = INVALID_OFPORT
try:
ofport = self._get_port_ofport(port_name)
except tenacity.RetryError:
LOG.exception(_LE("Timed out retrieving ofport on port %s."),
port_name)
return ofport
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id')
def do_action_flows(self, action, kwargs_list):
if action != 'del':
for kw in kwargs_list:
if 'cookie' not in kw:
kw['cookie'] = self._default_cookie
flow_strs = [_build_flow_expr_str(kw, action) for kw in kwargs_list]
self.run_ofctl('%s-flows' % action, ['-'], '\n'.join(flow_strs))
def add_flow(self, **kwargs):
self.do_action_flows('add', [kwargs])
def mod_flow(self, **kwargs):
self.do_action_flows('mod', [kwargs])
def delete_flows(self, **kwargs):
self.do_action_flows('del', [kwargs])
def dump_flows_for_table(self, table):
return self.dump_flows_for(table=table)
def dump_flows_for(self, **kwargs):
retval = None
if "cookie" in kwargs:
kwargs["cookie"] = check_cookie_mask(str(kwargs["cookie"]))
flow_str = ",".join("=".join([key, str(val)])
for key, val in kwargs.items())
flows = self.run_ofctl("dump-flows", [flow_str])
if flows:
retval = '\n'.join(item for item in flows.splitlines()
if 'NXST' not in item)
return retval
def dump_all_flows(self):
return [f for f in self.run_ofctl("dump-flows", []).splitlines()
if 'NXST' not in f]
def deferred(self, **kwargs):
return DeferredOVSBridge(self, **kwargs)
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=p_const.VXLAN_UDP_PORT,
dont_fragment=True,
tunnel_csum=False):
attrs = [('type', tunnel_type)]
# TODO(twilson) This is an OrderedDict solely to make a test happy
options = collections.OrderedDict()
vxlan_uses_custom_udp_port = (
tunnel_type == p_const.TYPE_VXLAN and
vxlan_udp_port != p_const.VXLAN_UDP_PORT
)
if vxlan_uses_custom_udp_port:
options['dst_port'] = vxlan_udp_port
options['df_default'] = str(dont_fragment).lower()
options['remote_ip'] = remote_ip
options['local_ip'] = local_ip
options['in_key'] = 'flow'
options['out_key'] = 'flow'
if tunnel_csum:
options['csum'] = str(tunnel_csum).lower()
attrs.append(('options', options))
return self.add_port(port_name, *attrs)
def add_patch_port(self, local_name, remote_name):
attrs = [('type', 'patch'),
('options', {'peer': remote_name})]
return self.add_port(local_name, *attrs)
def get_iface_name_list(self):
# get the interface name list for this bridge
return self.ovsdb.list_ifaces(self.br_name).execute(check_error=True)
def get_port_name_list(self):
# get the port name list for this bridge
return self.ovsdb.list_ports(self.br_name).execute(check_error=True)
def get_port_stats(self, port_name):
return self.db_get_val("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, run_as_root=True).strip()
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': args, 'exception': e})
def get_ports_attributes(self, table, columns=None, ports=None,
check_error=True, log_errors=True,
if_exists=False):
port_names = ports or self.get_port_name_list()
if not port_names:
return []
return (self.ovsdb.db_list(table, port_names, columns=columns,
if_exists=if_exists).
execute(check_error=check_error, log_errors=log_errors))
# returns a VIF object for each VIF port
def get_vif_ports(self, ofport_filter=None):
edge_ports = []
port_info = self.get_ports_attributes(
'Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)
for port in port_info:
name = port['name']
external_ids = port['external_ids']
ofport = port['ofport']
if ofport_filter and ofport in ofport_filter:
continue
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_to_ofport_map(self):
results = self.get_ports_attributes(
'Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)
port_map = {}
for r in results:
# fall back to basic interface name
key = self.portid_from_external_ids(r['external_ids']) or r['name']
try:
port_map[key] = int(r['ofport'])
except TypeError:
# port doesn't yet have an ofport entry so we ignore it
pass
return port_map
def get_vif_port_set(self):
edge_ports = set()
results = self.get_ports_attributes(
'Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)
for result in results:
if result['ofport'] == UNASSIGNED_OFPORT:
LOG.warning(_LW("Found not yet ready openvswitch port: %s"),
result['name'])
elif result['ofport'] == INVALID_OFPORT:
LOG.warning(_LW("Found failed openvswitch port: %s"),
result['name'])
elif 'attached-mac' in result['external_ids']:
port_id = self.portid_from_external_ids(result['external_ids'])
if port_id:
edge_ports.add(port_id)
return edge_ports
def portid_from_external_ids(self, external_ids):
if 'iface-id' in external_ids:
return external_ids['iface-id']
if 'xs-vif-uuid' in external_ids:
iface_id = self.get_xapi_iface_id(
external_ids['xs-vif-uuid'])
return iface_id
def get_port_tag_dict(self):
"""Get a dict of port names and associated vlan tags.
e.g. the returned dict is of the following form::
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
The TAG ID is only available in the "Port" table and is not available
in the "Interface" table queried by the get_vif_port_set() method.
"""
results = self.get_ports_attributes(
'Port', columns=['name', 'tag'], if_exists=True)
return {p['name']: p['tag'] for p in results}
def get_vifs_by_ids(self, port_ids):
interface_info = self.get_ports_attributes(
"Interface", columns=["name", "external_ids", "ofport"],
if_exists=True)
by_id = {x['external_ids'].get('iface-id'): x for x in interface_info}
result = {}
for port_id in port_ids:
result[port_id] = None
if port_id not in by_id:
LOG.info(_LI("Port %(port_id)s not present in bridge "
"%(br_name)s"),
{'port_id': port_id, 'br_name': self.br_name})
continue
pinfo = by_id[port_id]
if not self._check_ofport(port_id, pinfo):
continue
mac = pinfo['external_ids'].get('attached-mac')
result[port_id] = VifPort(pinfo['name'], pinfo['ofport'],
port_id, mac, self)
return result
@staticmethod
def _check_ofport(port_id, port_info):
if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s "
"is not a positive integer"),
{'ofport': port_info['ofport'], 'vif': port_id})
return False
return True
def get_vif_port_by_id(self, port_id):
ports = self.ovsdb.db_find(
'Interface', ('external_ids', '=', {'iface-id': port_id}),
('external_ids', '!=', {'attached-mac': ''}),
columns=['external_ids', 'name', 'ofport']).execute()
for port in ports:
if self.br_name != self.get_bridge_for_iface(port['name']):
continue
if not self._check_ofport(port_id, port):
continue
mac = port['external_ids'].get('attached-mac')
return VifPort(port['name'], port['ofport'], port_id, mac, self)
LOG.info(_LI("Port %(port_id)s not present in bridge %(br_name)s"),
{'port_id': port_id, 'br_name': self.br_name})
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def set_controllers_connection_mode(self, connection_mode):
"""Set bridge controllers connection mode.
:param connection_mode: "out-of-band" or "in-band"
"""
attr = [('connection_mode', connection_mode)]
controllers = self.db_get_val('Bridge', self.br_name, 'controller')
controllers = [controllers] if isinstance(
controllers, uuid.UUID) else controllers
with self.ovsdb.transaction(check_error=True) as txn:
for controller_uuid in controllers:
txn.add(self.ovsdb.db_set('Controller',
controller_uuid, *attr))
def _set_egress_bw_limit_for_port(self, port_name, max_kbps,
max_burst_kbps):
with self.ovsdb.transaction(check_error=True) as txn:
txn.add(self.ovsdb.db_set('Interface', port_name,
('ingress_policing_rate', max_kbps)))
txn.add(self.ovsdb.db_set('Interface', port_name,
('ingress_policing_burst',
max_burst_kbps)))
def create_egress_bw_limit_for_port(self, port_name, max_kbps,
max_burst_kbps):
self._set_egress_bw_limit_for_port(
port_name, max_kbps, max_burst_kbps)
def get_egress_bw_limit_for_port(self, port_name):
max_kbps = self.db_get_val('Interface', port_name,
'ingress_policing_rate')
max_burst_kbps = self.db_get_val('Interface', port_name,
'ingress_policing_burst')
max_kbps = max_kbps or None
max_burst_kbps = max_burst_kbps or None
return max_kbps, max_burst_kbps
def delete_egress_bw_limit_for_port(self, port_name):
self._set_egress_bw_limit_for_port(
port_name, 0, 0)
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.destroy()
class DeferredOVSBridge(object):
'''Deferred OVSBridge.
This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge
and defers their application until apply_flows call in order to perform
bulk calls. It wraps also ALLOWED_PASSTHROUGHS calls to avoid mixing
OVSBridge and DeferredOVSBridge uses.
This class can be used as a context, in such case apply_flows is called on
__exit__ except if an exception is raised.
This class is not thread-safe, that's why for every use a new instance
must be implemented.
'''
ALLOWED_PASSTHROUGHS = 'add_port', 'add_tunnel_port', 'delete_port'
def __init__(self, br, full_ordered=False,
order=('add', 'mod', 'del')):
'''Constructor.
:param br: wrapped bridge
:param full_ordered: Optional, disable flow reordering (slower)
:param order: Optional, define in which order flow are applied
'''
self.br = br
self.full_ordered = full_ordered
self.order = order
if not self.full_ordered:
self.weights = dict((y, x) for x, y in enumerate(self.order))
self.action_flow_tuples = []
def __getattr__(self, name):
if name in self.ALLOWED_PASSTHROUGHS:
return getattr(self.br, name)
raise AttributeError(name)
def add_flow(self, **kwargs):
self.action_flow_tuples.append(('add', kwargs))
def mod_flow(self, **kwargs):
self.action_flow_tuples.append(('mod', kwargs))
def delete_flows(self, **kwargs):
self.action_flow_tuples.append(('del', kwargs))
def apply_flows(self):
action_flow_tuples = self.action_flow_tuples
self.action_flow_tuples = []
if not action_flow_tuples:
return
if not self.full_ordered:
action_flow_tuples.sort(key=lambda af: self.weights[af[0]])
grouped = itertools.groupby(action_flow_tuples,
key=operator.itemgetter(0))
itemgetter_1 = operator.itemgetter(1)
for action, action_flow_list in grouped:
flows = list(map(itemgetter_1, action_flow_list))
self.br.do_action_flows(action, flows)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.apply_flows()
else:
LOG.exception(_LE("OVS flows could not be applied on bridge %s"),
self.br.br_name)
def _build_flow_expr_str(flow_dict, cmd):
flow_expr_arr = []
actions = None
if cmd == 'add':
flow_expr_arr.append("hard_timeout=%s" %
flow_dict.pop('hard_timeout', '0'))
flow_expr_arr.append("idle_timeout=%s" %
flow_dict.pop('idle_timeout', '0'))
flow_expr_arr.append("priority=%s" %
flow_dict.pop('priority', '1'))
elif 'priority' in flow_dict:
msg = _("Cannot match priority on flow deletion or modification")
raise exceptions.InvalidInput(error_message=msg)
if cmd != 'del':
if "actions" not in flow_dict:
msg = _("Must specify one or more actions on flow addition"
" or modification")
raise exceptions.InvalidInput(error_message=msg)
actions = "actions=%s" % flow_dict.pop('actions')
for key, value in six.iteritems(flow_dict):
if key == 'proto':
flow_expr_arr.append(value)
else:
flow_expr_arr.append("%s=%s" % (key, str(value)))
if actions:
flow_expr_arr.append(actions)
return ','.join(flow_expr_arr)
def generate_random_cookie():
return uuid.uuid4().int & UINT64_BITMASK
def check_cookie_mask(cookie):
if '/' not in cookie:
return cookie + '/-1'
else:
return cookie
|
|
import socket
from .BTcrypto import Crypto, CRYPTO_OK, padding
from .Encrypter import protocol_name, option_pattern
CHECK_PEER_ID_ENCRYPTED = True
# header, reserved, download id, my id, [length, message]
class NatCheck(object):
def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver,
encrypted=False):
self.resultfunc = resultfunc
self.downloadid = downloadid
self.peerid = peerid
self.ip = ip
self.port = port
self.encrypted = encrypted
self.closed = False
self.buffer = b''
self.read = self._read
self.write = self._write
try:
self.connection = rawserver.start_connection((ip, port), self)
if encrypted:
self._dc = not(CRYPTO_OK and CHECK_PEER_ID_ENCRYPTED)
self.encrypter = Crypto(True, disable_crypto=self._dc)
self.write(self.encrypter.padded_pubkey())
else:
self.encrypter = None
self.write(protocol_name + bytes(8) + downloadid)
except socket.error:
self.answer(False)
except IOError:
self.answer(False)
self.next_len = len(protocol_name)
self.next_func = self.read_header
def answer(self, result):
self.closed = True
try:
self.connection.close()
except AttributeError:
pass
self.resultfunc(result, self.downloadid, self.peerid, self.ip,
self.port)
def _read_header(self, s):
if s == protocol_name:
return 8, self.read_options
return None
def read_header(self, s):
if self._read_header(s):
if self.encrypted:
return None
return 8, self.read_options
if not self.encrypted:
return None
self._write_buffer(s)
return self.encrypter.keylength, self.read_crypto_header
################## ENCRYPTION SUPPORT ######################
def _start_crypto(self):
self.encrypter.setrawaccess(self._read, self._write)
self.write = self.encrypter.write
self.read = self.encrypter.read
if self.buffer:
self.buffer = self.encrypter.decrypt(self.buffer)
def read_crypto_header(self, s):
self.encrypter.received_key(s)
self.encrypter.set_skey(self.downloadid)
cryptmode = b'\x00\x00\x00\x02' # full stream encryption
padc = padding()
self.write(self.encrypter.block3a +
self.encrypter.block3b +
self.encrypter.encrypt(
bytes(8) # VC
+ cryptmode # acceptable crypto modes
+ len(padc).to_bytes(2, 'big')
+ padc # PadC
+ bytes(2))) # no initial payload data
self._max_search = 520
return 1, self.read_crypto_block4a
def _search_for_pattern(self, s, pat):
p = s.find(pat)
if p < 0:
if len(s) >= len(pat):
self._max_search -= len(s) + 1 - len(pat)
if self._max_search < 0:
self.close()
return False
self._write_buffer(s[1 - len(pat):])
return False
self._write_buffer(s[p + len(pat):])
return True
### OUTGOING CONNECTION ###
def read_crypto_block4a(self, s):
if not self._search_for_pattern(s, self.encrypter.VC_pattern()):
return -1, self.read_crypto_block4a # wait for more data
if self._dc: # can't or won't go any further
self.answer(True)
return None
self._start_crypto()
return 6, self.read_crypto_block4b
def read_crypto_block4b(self, s):
self.cryptmode = int.from_bytes(s[:4], 'big') % 4
if self.cryptmode != 2:
return None # unknown encryption
padlen = int.from_bytes(s[4:6], 2)
if padlen > 512:
return None
if padlen:
return padlen, self.read_crypto_pad4
return self.read_crypto_block4done()
def read_crypto_pad4(self, s):
# discard data
return self.read_crypto_block4done()
def read_crypto_block4done(self):
if self.cryptmode == 1: # only handshake encryption
if not self.buffer: # oops; check for exceptions to this
return None
self._end_crypto()
self.write(protocol_name + option_pattern + self.Encoder.download_id)
return len(protocol_name), self.read_encrypted_header
### START PROTOCOL OVER ENCRYPTED CONNECTION ###
def read_encrypted_header(self, s):
return self._read_header(s)
################################################
def read_options(self, s):
return 20, self.read_download_id
def read_download_id(self, s):
if s != self.downloadid:
return None
return 20, self.read_peer_id
def read_peer_id(self, s):
if s != self.peerid:
return None
self.answer(True)
return None
def _write(self, message):
if not self.closed:
self.connection.write(message)
def data_came_in(self, connection, s):
self.read(s)
def _write_buffer(self, s):
self.buffer = s + self.buffer
def _read(self, s):
self.buffer += s
while True:
if self.closed:
return
# self.next_len = # of characters function expects
# or 0 = all characters in the buffer
# or -1 = wait for next read, then all characters in the buffer
# not compatible w/ keepalives, switch out after all negotiation
# complete
if self.next_len <= 0:
m = self.buffer
self.buffer = b''
elif len(self.buffer) >= self.next_len:
m = self.buffer[:self.next_len]
self.buffer = self.buffer[self.next_len:]
else:
return
try:
x = self.next_func(m)
except Exception:
if not self.closed:
self.answer(False)
return
if x is None:
if not self.closed:
self.answer(False)
return
self.next_len, self.next_func = x
if self.next_len < 0: # already checked buffer
return # wait for additional data
if self.bufferlen is not None:
self._read2(b'')
return
def connection_lost(self, connection):
if not self.closed:
self.closed = True
self.resultfunc(False, self.downloadid, self.peerid, self.ip,
self.port)
def connection_flushed(self, connection):
pass
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities."""
import binascii
import json
import os
import re
from typing import Any, List, Dict, Optional, TYPE_CHECKING, Tuple
from azure.identity import DefaultAzureCredential
from libcloudforensics import logging_utils
from libcloudforensics import errors
if TYPE_CHECKING:
# TYPE_CHECKING is always False at runtime, therefore it is safe to ignore
# the following cyclic import, as it it only used for type hints
from libcloudforensics.providers.azure.internal import compute # pylint: disable=cyclic-import
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
# pylint: disable=line-too-long
# See https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/resource-name-rules
# pylint: enable=line-too-long
REGEX_DISK_NAME = re.compile('^[\\w]{1,80}$')
REGEX_SNAPSHOT_NAME = re.compile('^(?=.{1,80}$)[a-zA-Z0-9]([\\w,-]*[\\w])?$')
REGEX_ACCOUNT_STORAGE_NAME = re.compile('^[a-z0-9]{1,24}$')
REGEX_COMPUTE_RESOURCE_ID = re.compile(
'/subscriptions/.+/resourceGroups/.+/providers/Microsoft.Compute/.+/.+')
DEFAULT_DISK_COPY_PREFIX = 'evidence'
UBUNTU_1804_SKU = '18.04-LTS'
def _ParseCredentialsFile(profile_name: str) -> Dict[str, Any]:
"""Parse Azure credentials.json file.
Args:
profile_name (str): A name for the Azure account information to retrieve.
The .json file should have the following format:
{
'profile_name': {
'subscriptionId': xxx,
'tenantId': xxx,
'clientId': xxx,
'clientSecret': xxx
},
'other_profile_name': {
'subscriptionId': yyy,
'tenantId': yyy,
'clientId': yyy,
'clientSecret': yyy
},
...
}
Note that you can specify several profiles that use the same tenantId,
clientId and clientSecret but a different subscriptionId.
If you set the environment variable AZURE_CREDENTIALS_PATH to an
absolute path to the credentials file, then the library will look
there instead of in ~/.azure/credentials.json.
Returns:
Dict[str, str]: A dict containing the required account_info fields.
Raises:
CredentialsConfigurationError: If there are environment variables that
are not set or if the credentials file has missing entries/profiles.
FileNotFoundError: If the credentials file is not found.
InvalidFileFormatError: If the credentials file couldn't be parsed.
"""
path = os.getenv('AZURE_CREDENTIALS_PATH')
if not path:
path = os.path.expanduser('~/.azure/credentials.json')
if not os.path.exists(path):
raise FileNotFoundError(
'Credentials file not found. Please place it in '
'"~/.azure/credentials.json" or specify an absolute path to it in '
'the AZURE_CREDENTIALS_PATH environment variable.')
with open(path, encoding='utf-8') as profiles:
try:
account_info: Dict[str, Any] = json.load(profiles).get(profile_name)
except ValueError as exception:
raise errors.InvalidFileFormatError(
'Could not decode JSON file. Please verify the file format:'
' {0!s}'.format(exception), __name__) from exception
if not account_info:
raise errors.CredentialsConfigurationError(
'Profile name {0:s} not found in credentials file {1:s}'.format(
profile_name, path), __name__)
required_entries = ['subscriptionId', 'clientId', 'clientSecret',
'tenantId']
if not all(account_info.get(entry) for entry in required_entries):
raise errors.CredentialsConfigurationError(
'Please make sure that your JSON file has the required entries. The '
'file should contain at least the following: {0:s}'.format(
', '.join(required_entries)), __name__)
return account_info
def _CheckAzureCliCredentials() -> Optional[str]:
"""Test if AzureCliCredentials are configured, returning the subscription
id if successful.
Returns:
str: the subscription_id of the credentials if properly configured or else
None.
Raises:
CredentialsConfigurationError: If AzureCliCredentials are configured but
the active subscription could not be determined.
"""
tokens = None
config_dir = os.getenv('AZURE_CONFIG_DIR')
if not config_dir:
config_dir = os.path.expanduser('~/.azure/')
tokens_path = os.path.join(config_dir, 'accessTokens.json')
profile_path = os.path.join(config_dir, 'azureProfile.json')
if not os.path.exists(tokens_path) or not os.path.exists(profile_path):
return None
with open(tokens_path, encoding='utf-8-sig') as tokens_fd:
tokens = json.load(tokens_fd)
# If tokens are not found then Azure CLI auth is not configured.
if not tokens:
return None
with open(profile_path, encoding='utf-8-sig') as profile_fd:
profile = json.load(profile_fd)
for subscription in profile['subscriptions']:
if subscription['isDefault']:
return str(subscription["id"])
raise errors.CredentialsConfigurationError(
'AzureCliCredentials tokens found but could not determine active '
'subscription. No "isDefault" set in "{0:s}"'.format(config_dir),
__name__)
def GetCredentials(profile_name: Optional[str] = None
) -> Tuple[str, DefaultAzureCredential]:
# pylint: disable=line-too-long
"""Get Azure credentials, trying three different methods:
1. If profile_name is provided it will attempt to parse credentials from a
credentials.json file, failing if this raises an exception.
2. Environment variables as per
https://docs.microsoft.com/en-us/azure/developer/python/azure-sdk-authenticate
3. Azure CLI credentials.
Args:
profile_name (str): A name for the Azure account information to retrieve.
If provided, then the library will look into ~/.azure/credentials.json
for the account information linked to profile_name.
Returns:
Tuple[str, DefaultAzureCredential]: Subscription ID and
corresponding Azure credentials.
Raises:
CredentialsConfigurationError: If none of the credential methods work.
"""
# pylint: enable=line-too-long
if profile_name:
account_info = _ParseCredentialsFile(profile_name)
# Set environment variables for DefaultAzureCredentials.
os.environ['AZURE_SUBSCRIPTION_ID'] = account_info['subscriptionId']
os.environ['AZURE_CLIENT_ID'] = account_info['clientId']
os.environ['AZURE_CLIENT_SECRET'] = account_info['clientSecret']
os.environ['AZURE_TENANT_ID'] = account_info['tenantId']
# Check if environment variables are already set for DefaultAzureCredentials.
subscription_id = os.getenv('AZURE_SUBSCRIPTION_ID')
client_id = os.getenv("AZURE_CLIENT_ID")
secret = os.getenv("AZURE_CLIENT_SECRET")
tenant = os.getenv("AZURE_TENANT_ID")
if not (subscription_id and client_id and secret and tenant):
logger.info('EnvironmentCredentials unavailable, falling back to '
'AzureCliCredentials.')
# Will be automatically picked up by DefaultAzureCredential if configured.
subscription_id = _CheckAzureCliCredentials()
if not subscription_id:
raise errors.CredentialsConfigurationError(
'No supported credentials found. If using environment variables '
'please make sure to define: [AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID, '
'AZURE_CLIENT_SECRET, AZURE_TENANT_ID].', __name__)
return subscription_id, DefaultAzureCredential()
def ExecuteRequest(
client: Any,
func: str,
kwargs: Optional[Dict[str, str]] = None) -> List[Any]:
"""Execute a request to the Azure API.
Args:
client (Any): An Azure operation client object.
func (str): An Azure function to query from the client.
kwargs (Dict): Optional. A dictionary of parameters for the function func.
Returns:
List[Any]: A List of Azure response objects (VirtualMachines, Disks, etc).
Raises:
RuntimeError: If the request to the Azure API could not complete.
"""
if not kwargs:
kwargs = {}
responses = []
next_link = ''
while True:
if next_link:
kwargs['next_link'] = next_link
request = getattr(client, func)
response = request(**kwargs)
responses.append(response)
next_link = response.next_link if hasattr(response, 'next_link') else None
if not next_link:
return responses
def GenerateDiskName(snapshot: 'compute.AZComputeSnapshot',
disk_name_prefix: Optional[str] = None) -> str:
"""Generate a new disk name for the disk to be created from the Snapshot.
The disk name must comply with the following RegEx:
- ^[\\w]{1-80}$
i.e., it must be between 1 and 80 chars, and can only contain alphanumeric
characters and underscores.
Args:
snapshot (AZComputeSnapshot): A disk's Snapshot.
disk_name_prefix (str): Optional. A prefix for the disk name.
Returns:
str: A name for the disk.
Raises:
InvalidNameError: If the disk name does not comply with the RegEx.
"""
# Max length of disk names in Azure is 80 characters
subscription_id = snapshot.az_account.subscription_id
disk_id = subscription_id + snapshot.disk.resource_id
disk_id_crc32 = '{0:08x}'.format(
binascii.crc32(disk_id.encode()) & 0xffffffff)
truncate_at = 80 - len(disk_id_crc32) - len('_copy') - 1
if disk_name_prefix:
disk_name_prefix += '_'
if len(disk_name_prefix) > truncate_at:
# The disk name prefix is too long
disk_name_prefix = disk_name_prefix[:truncate_at]
truncate_at -= len(disk_name_prefix)
disk_name = '{0:s}{1:s}_{2:s}_copy'.format(
disk_name_prefix, snapshot.name[:truncate_at], disk_id_crc32)
else:
disk_name = '{0:s}_{1:s}_copy'.format(
snapshot.name[:truncate_at], disk_id_crc32)
# Azure doesn't allow dashes in disk names, only underscores. If the
# name of the source snapshot contained dashes, we need to replace them.
disk_name = disk_name.replace('-', '_')
if not REGEX_DISK_NAME.match(disk_name):
raise errors.InvalidNameError(
'Disk name {0:s} does not comply with '
'{1:s}'.format(disk_name, REGEX_DISK_NAME.pattern), __name__)
return disk_name
|
|
"""Crust combines the shell and filling into one control."""
__author__ = "Patrick K. O'Brien <[email protected]>"
__cvsid__ = "$Id$"
__revision__ = "$Revision$"[11:-2]
import wx
import os
import pprint
import re
import sys
import dispatcher
import editwindow
from filling import Filling
import frame
from shell import Shell
from version import VERSION
class Crust(wx.SplitterWindow):
"""Crust based on SplitterWindow."""
name = 'Crust'
revision = __revision__
sashoffset = 300
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.SP_3D|wx.SP_LIVE_UPDATE,
name='Crust Window', rootObject=None, rootLabel=None,
rootIsNamespace=True, intro='', locals=None,
InterpClass=None,
startupScript=None, execStartupScript=True,
*args, **kwds):
"""Create Crust instance."""
wx.SplitterWindow.__init__(self, parent, id, pos, size, style, name)
# Turn off the tab-traversal style that is automatically
# turned on by wx.SplitterWindow. We do this because on
# Windows the event for Ctrl-Enter is stolen and used as a
# navigation key, but the Shell window uses it to insert lines.
style = self.GetWindowStyle()
self.SetWindowStyle(style & ~wx.TAB_TRAVERSAL)
self.shell = Shell(parent=self, introText=intro,
locals=locals, InterpClass=InterpClass,
startupScript=startupScript,
execStartupScript=execStartupScript,
*args, **kwds)
self.editor = self.shell
if rootObject is None:
rootObject = self.shell.interp.locals
self.notebook = wx.Notebook(parent=self, id=-1)
self.shell.interp.locals['notebook'] = self.notebook
self.filling = Filling(parent=self.notebook,
rootObject=rootObject,
rootLabel=rootLabel,
rootIsNamespace=rootIsNamespace)
# Add 'filling' to the interpreter's locals.
self.shell.interp.locals['filling'] = self.filling
self.notebook.AddPage(page=self.filling, text='Namespace', select=True)
self.display = Display(parent=self.notebook)
self.notebook.AddPage(page=self.display, text='Display')
# Add 'pp' (pretty print) to the interpreter's locals.
self.shell.interp.locals['pp'] = self.display.setItem
self.display.nbTab = self.notebook.GetPageCount()-1
self.calltip = Calltip(parent=self.notebook)
self.notebook.AddPage(page=self.calltip, text='Calltip')
self.sessionlisting = SessionListing(parent=self.notebook)
self.notebook.AddPage(page=self.sessionlisting, text='History')
self.dispatcherlisting = DispatcherListing(parent=self.notebook)
self.notebook.AddPage(page=self.dispatcherlisting, text='Dispatcher')
# Initialize in an unsplit mode, and check later after loading
# settings if we should split or not.
self.shell.Hide()
self.notebook.Hide()
self.Initialize(self.shell)
self._shouldsplit = True
wx.CallAfter(self._CheckShouldSplit)
self.SetMinimumPaneSize(100)
self.Bind(wx.EVT_SIZE, self.SplitterOnSize)
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.OnChanged)
self.Bind(wx.EVT_SPLITTER_DCLICK, self.OnSashDClick)
def _CheckShouldSplit(self):
if self._shouldsplit:
self.SplitHorizontally(self.shell, self.notebook, -self.sashoffset)
self.lastsashpos = self.GetSashPosition()
else:
self.lastsashpos = -1
self.issplit = self.IsSplit()
def ToggleTools(self):
"""Toggle the display of the filling and other tools"""
if self.issplit:
self.Unsplit()
else:
self.SplitHorizontally(self.shell, self.notebook, -self.sashoffset)
self.lastsashpos = self.GetSashPosition()
self.issplit = self.IsSplit()
def ToolsShown(self):
return self.issplit
def OnChanged(self, event):
"""update sash offset from the bottom of the window"""
self.sashoffset = self.GetSize().height - event.GetSashPosition()
self.lastsashpos = event.GetSashPosition()
event.Skip()
def OnSashDClick(self, event):
self.Unsplit()
self.issplit = False
# Make the splitter expand the top window when resized
def SplitterOnSize(self, event):
splitter = event.GetEventObject()
sz = splitter.GetSize()
splitter.SetSashPosition(sz.height - self.sashoffset, True)
event.Skip()
def LoadSettings(self, config):
self.shell.LoadSettings(config)
self.filling.LoadSettings(config)
pos = config.ReadInt('Sash/CrustPos', 400)
wx.CallAfter(self.SetSashPosition, pos)
def _updateSashPosValue():
sz = self.GetSize()
self.sashoffset = sz.height - self.GetSashPosition()
wx.CallAfter(_updateSashPosValue)
zoom = config.ReadInt('View/Zoom/Display', -99)
if zoom != -99:
self.display.SetZoom(zoom)
self.issplit = config.ReadInt('Sash/IsSplit', True)
if not self.issplit:
self._shouldsplit = False
def SaveSettings(self, config):
self.shell.SaveSettings(config)
self.filling.SaveSettings(config)
if self.lastsashpos != -1:
config.WriteInt('Sash/CrustPos', self.lastsashpos)
config.WriteInt('Sash/IsSplit', self.issplit)
config.WriteInt('View/Zoom/Display', self.display.GetZoom())
class Display(editwindow.EditWindow):
"""STC used to display an object using Pretty Print."""
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.CLIP_CHILDREN | wx.SUNKEN_BORDER,
static=False):
"""Create Display instance."""
editwindow.EditWindow.__init__(self, parent, id, pos, size, style)
# Configure various defaults and user preferences.
self.SetReadOnly(True)
self.SetWrapMode(False)
if not static:
dispatcher.connect(receiver=self.push, signal='Interpreter.push')
def push(self, command, more):
"""Receiver for Interpreter.push signal."""
self.Refresh()
def Refresh(self):
if not hasattr(self, "item"):
return
self.SetReadOnly(False)
text = pprint.pformat(self.item)
self.SetText(text)
self.SetReadOnly(True)
def setItem(self, item):
"""Set item to pretty print in the notebook Display tab."""
self.item = item
self.Refresh()
if self.GetParent().GetSelection() != self.nbTab:
focus = wx.Window.FindFocus()
self.GetParent().SetSelection(self.nbTab)
wx.CallAfter(focus.SetFocus)
# TODO: Switch this to a editwindow.EditWindow
class Calltip(wx.TextCtrl):
"""Text control containing the most recent shell calltip."""
def __init__(self, parent=None, id=-1,ShellClassName='Shell'):
style = (wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH2)
wx.TextCtrl.__init__(self, parent, id, style=style)
self.SetBackgroundColour(wx.Colour(255, 255, 208))
self.ShellClassName=ShellClassName
dispatcher.connect(receiver=self.display, signal=self.ShellClassName+'.calltip')
df = self.GetFont()
font = wx.Font(df.GetPointSize(), wx.TELETYPE, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
def display(self, calltip):
"""Receiver for """+self.ShellClassName+""".calltip signal."""
## self.SetValue(calltip) # Caused refresh problem on Windows.
self.Clear()
self.AppendText(calltip)
self.SetInsertionPoint(0)
# TODO: Switch this to a editwindow.EditWindow
class SessionListing(wx.TextCtrl):
"""Text control containing all commands for session."""
def __init__(self, parent=None, id=-1,ShellClassName='Shell'):
style = (wx.TE_MULTILINE | wx.TE_READONLY |
wx.TE_RICH2 | wx.TE_DONTWRAP)
wx.TextCtrl.__init__(self, parent, id, style=style)
dispatcher.connect(receiver=self.addHistory,
signal=ShellClassName+".addHistory")
dispatcher.connect(receiver=self.clearHistory,
signal=ShellClassName+".clearHistory")
dispatcher.connect(receiver=self.loadHistory,
signal=ShellClassName+".loadHistory")
df = self.GetFont()
font = wx.Font(df.GetPointSize(), wx.TELETYPE, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
def loadHistory(self, history):
# preload the existing history, if any
hist = history[:]
hist.reverse()
self.SetValue('\n'.join(hist) + '\n')
self.SetInsertionPointEnd()
def addHistory(self, command):
if command:
self.SetInsertionPointEnd()
self.AppendText(command + '\n')
def clearHistory(self):
self.SetValue("")
class DispatcherListing(wx.TextCtrl):
"""Text control containing all dispatches for session."""
def __init__(self, parent=None, id=-1):
style = (wx.TE_MULTILINE | wx.TE_READONLY |
wx.TE_RICH2 | wx.TE_DONTWRAP)
wx.TextCtrl.__init__(self, parent, id, style=style)
dispatcher.connect(receiver=self.spy)
df = self.GetFont()
font = wx.Font(df.GetPointSize(), wx.TELETYPE, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
def spy(self, signal, sender):
"""Receiver for Any signal from Any sender."""
text = '%r from %s' % (signal, sender)
self.SetInsertionPointEnd()
start, end = self.GetSelection()
if start != end:
self.SetSelection(0, 0)
self.AppendText(text + '\n')
class CrustFrame(frame.Frame, frame.ShellFrameMixin):
"""Frame containing all the PyCrust components."""
name = 'CrustFrame'
revision = __revision__
def __init__(self, parent=None, id=-1, title='PyCrust',
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_FRAME_STYLE,
rootObject=None, rootLabel=None, rootIsNamespace=True,
locals=None, InterpClass=None,
config=None, dataDir=None,
*args, **kwds):
"""Create CrustFrame instance."""
frame.Frame.__init__(self, parent, id, title, pos, size, style,
shellName='PyCrust')
frame.ShellFrameMixin.__init__(self, config, dataDir)
if size == wx.DefaultSize:
self.SetSize((800, 600))
intro = 'PyCrust %s - The Flakiest Python Shell' % VERSION
self.SetStatusText(intro.replace('\n', ', '))
self.crust = Crust(parent=self, intro=intro,
rootObject=rootObject,
rootLabel=rootLabel,
rootIsNamespace=rootIsNamespace,
locals=locals,
InterpClass=InterpClass,
startupScript=self.startupScript,
execStartupScript=self.execStartupScript,
*args, **kwds)
self.shell = self.crust.shell
# Override the filling so that status messages go to the status bar.
self.crust.filling.tree.setStatusText = self.SetStatusText
# Override the shell so that status messages go to the status bar.
self.shell.setStatusText = self.SetStatusText
self.shell.SetFocus()
self.LoadSettings()
def OnClose(self, event):
"""Event handler for closing."""
self.SaveSettings()
self.crust.shell.destroy()
self.Destroy()
def OnAbout(self, event):
"""Display an About window."""
title = 'About PyCrust'
text = 'PyCrust %s\n\n' % VERSION + \
'Yet another Python shell, only flakier.\n\n' + \
'Half-baked by Patrick K. O\'Brien,\n' + \
'the other half is still in the oven.\n\n' + \
'Shell Revision: %s\n' % self.shell.revision + \
'Interpreter Revision: %s\n\n' % self.shell.interp.revision + \
'Platform: %s\n' % sys.platform + \
'Python Version: %s\n' % sys.version.split()[0] + \
'wxPython Version: %s\n' % wx.VERSION_STRING + \
('\t(%s)\n' % ", ".join(wx.PlatformInfo[1:]))
dialog = wx.MessageDialog(self, text, title,
wx.OK | wx.ICON_INFORMATION)
dialog.ShowModal()
dialog.Destroy()
def ToggleTools(self):
"""Toggle the display of the filling and other tools"""
return self.crust.ToggleTools()
def ToolsShown(self):
return self.crust.ToolsShown()
def OnHelp(self, event):
"""Show a help dialog."""
frame.ShellFrameMixin.OnHelp(self, event)
def LoadSettings(self):
if self.config is not None:
frame.ShellFrameMixin.LoadSettings(self)
frame.Frame.LoadSettings(self, self.config)
self.crust.LoadSettings(self.config)
def SaveSettings(self, force=False):
if self.config is not None:
frame.ShellFrameMixin.SaveSettings(self,force)
if self.autoSaveSettings or force:
frame.Frame.SaveSettings(self, self.config)
self.crust.SaveSettings(self.config)
def DoSaveSettings(self):
if self.config is not None:
self.SaveSettings(force=True)
self.config.Flush()
|
|
"""Support for Xiaomi Mi Air Purifier and Xiaomi Mi Air Humidifier."""
import asyncio
from enum import Enum
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components.fan import (
FanEntity,
PLATFORM_SCHEMA,
SUPPORT_SET_SPEED,
DOMAIN,
)
from homeassistant.const import CONF_NAME, CONF_HOST, CONF_TOKEN, ATTR_ENTITY_ID
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Device"
DATA_KEY = "fan.xiaomi_miio"
CONF_MODEL = "model"
MODEL_AIRPURIFIER_V1 = "zhimi.airpurifier.v1"
MODEL_AIRPURIFIER_V2 = "zhimi.airpurifier.v2"
MODEL_AIRPURIFIER_V3 = "zhimi.airpurifier.v3"
MODEL_AIRPURIFIER_V5 = "zhimi.airpurifier.v5"
MODEL_AIRPURIFIER_PRO = "zhimi.airpurifier.v6"
MODEL_AIRPURIFIER_PRO_V7 = "zhimi.airpurifier.v7"
MODEL_AIRPURIFIER_M1 = "zhimi.airpurifier.m1"
MODEL_AIRPURIFIER_M2 = "zhimi.airpurifier.m2"
MODEL_AIRPURIFIER_MA1 = "zhimi.airpurifier.ma1"
MODEL_AIRPURIFIER_MA2 = "zhimi.airpurifier.ma2"
MODEL_AIRPURIFIER_SA1 = "zhimi.airpurifier.sa1"
MODEL_AIRPURIFIER_SA2 = "zhimi.airpurifier.sa2"
MODEL_AIRPURIFIER_2S = "zhimi.airpurifier.mc1"
MODEL_AIRHUMIDIFIER_V1 = "zhimi.humidifier.v1"
MODEL_AIRHUMIDIFIER_CA = "zhimi.humidifier.ca1"
MODEL_AIRFRESH_VA2 = "zhimi.airfresh.va2"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODEL): vol.In(
[
MODEL_AIRPURIFIER_V1,
MODEL_AIRPURIFIER_V2,
MODEL_AIRPURIFIER_V3,
MODEL_AIRPURIFIER_V5,
MODEL_AIRPURIFIER_PRO,
MODEL_AIRPURIFIER_PRO_V7,
MODEL_AIRPURIFIER_M1,
MODEL_AIRPURIFIER_M2,
MODEL_AIRPURIFIER_MA1,
MODEL_AIRPURIFIER_MA2,
MODEL_AIRPURIFIER_SA1,
MODEL_AIRPURIFIER_SA2,
MODEL_AIRPURIFIER_2S,
MODEL_AIRHUMIDIFIER_V1,
MODEL_AIRHUMIDIFIER_CA,
MODEL_AIRFRESH_VA2,
]
),
}
)
ATTR_MODEL = "model"
# Air Purifier
ATTR_TEMPERATURE = "temperature"
ATTR_HUMIDITY = "humidity"
ATTR_AIR_QUALITY_INDEX = "aqi"
ATTR_MODE = "mode"
ATTR_FILTER_HOURS_USED = "filter_hours_used"
ATTR_FILTER_LIFE = "filter_life_remaining"
ATTR_FAVORITE_LEVEL = "favorite_level"
ATTR_BUZZER = "buzzer"
ATTR_CHILD_LOCK = "child_lock"
ATTR_LED = "led"
ATTR_LED_BRIGHTNESS = "led_brightness"
ATTR_MOTOR_SPEED = "motor_speed"
ATTR_AVERAGE_AIR_QUALITY_INDEX = "average_aqi"
ATTR_PURIFY_VOLUME = "purify_volume"
ATTR_BRIGHTNESS = "brightness"
ATTR_LEVEL = "level"
ATTR_MOTOR2_SPEED = "motor2_speed"
ATTR_ILLUMINANCE = "illuminance"
ATTR_FILTER_RFID_PRODUCT_ID = "filter_rfid_product_id"
ATTR_FILTER_RFID_TAG = "filter_rfid_tag"
ATTR_FILTER_TYPE = "filter_type"
ATTR_LEARN_MODE = "learn_mode"
ATTR_SLEEP_TIME = "sleep_time"
ATTR_SLEEP_LEARN_COUNT = "sleep_mode_learn_count"
ATTR_EXTRA_FEATURES = "extra_features"
ATTR_FEATURES = "features"
ATTR_TURBO_MODE_SUPPORTED = "turbo_mode_supported"
ATTR_AUTO_DETECT = "auto_detect"
ATTR_SLEEP_MODE = "sleep_mode"
ATTR_VOLUME = "volume"
ATTR_USE_TIME = "use_time"
ATTR_BUTTON_PRESSED = "button_pressed"
# Air Humidifier
ATTR_TARGET_HUMIDITY = "target_humidity"
ATTR_TRANS_LEVEL = "trans_level"
ATTR_HARDWARE_VERSION = "hardware_version"
# Air Humidifier CA
ATTR_MOTOR_SPEED = "motor_speed"
ATTR_DEPTH = "depth"
ATTR_DRY = "dry"
# Air Fresh
ATTR_CO2 = "co2"
# Map attributes to properties of the state object
AVAILABLE_ATTRIBUTES_AIRPURIFIER_COMMON = {
ATTR_TEMPERATURE: "temperature",
ATTR_HUMIDITY: "humidity",
ATTR_AIR_QUALITY_INDEX: "aqi",
ATTR_MODE: "mode",
ATTR_FILTER_HOURS_USED: "filter_hours_used",
ATTR_FILTER_LIFE: "filter_life_remaining",
ATTR_FAVORITE_LEVEL: "favorite_level",
ATTR_CHILD_LOCK: "child_lock",
ATTR_LED: "led",
ATTR_MOTOR_SPEED: "motor_speed",
ATTR_AVERAGE_AIR_QUALITY_INDEX: "average_aqi",
ATTR_LEARN_MODE: "learn_mode",
ATTR_EXTRA_FEATURES: "extra_features",
ATTR_TURBO_MODE_SUPPORTED: "turbo_mode_supported",
ATTR_BUTTON_PRESSED: "button_pressed",
}
AVAILABLE_ATTRIBUTES_AIRPURIFIER = {
**AVAILABLE_ATTRIBUTES_AIRPURIFIER_COMMON,
ATTR_PURIFY_VOLUME: "purify_volume",
ATTR_SLEEP_TIME: "sleep_time",
ATTR_SLEEP_LEARN_COUNT: "sleep_mode_learn_count",
ATTR_AUTO_DETECT: "auto_detect",
ATTR_USE_TIME: "use_time",
ATTR_BUZZER: "buzzer",
ATTR_LED_BRIGHTNESS: "led_brightness",
ATTR_SLEEP_MODE: "sleep_mode",
}
AVAILABLE_ATTRIBUTES_AIRPURIFIER_PRO = {
**AVAILABLE_ATTRIBUTES_AIRPURIFIER_COMMON,
ATTR_PURIFY_VOLUME: "purify_volume",
ATTR_USE_TIME: "use_time",
ATTR_FILTER_RFID_PRODUCT_ID: "filter_rfid_product_id",
ATTR_FILTER_RFID_TAG: "filter_rfid_tag",
ATTR_FILTER_TYPE: "filter_type",
ATTR_ILLUMINANCE: "illuminance",
ATTR_MOTOR2_SPEED: "motor2_speed",
ATTR_VOLUME: "volume",
# perhaps supported but unconfirmed
ATTR_AUTO_DETECT: "auto_detect",
ATTR_SLEEP_TIME: "sleep_time",
ATTR_SLEEP_LEARN_COUNT: "sleep_mode_learn_count",
}
AVAILABLE_ATTRIBUTES_AIRPURIFIER_PRO_V7 = {
**AVAILABLE_ATTRIBUTES_AIRPURIFIER_COMMON,
ATTR_FILTER_RFID_PRODUCT_ID: "filter_rfid_product_id",
ATTR_FILTER_RFID_TAG: "filter_rfid_tag",
ATTR_FILTER_TYPE: "filter_type",
ATTR_ILLUMINANCE: "illuminance",
ATTR_MOTOR2_SPEED: "motor2_speed",
ATTR_VOLUME: "volume",
}
AVAILABLE_ATTRIBUTES_AIRPURIFIER_2S = {
**AVAILABLE_ATTRIBUTES_AIRPURIFIER_COMMON,
ATTR_BUZZER: "buzzer",
ATTR_FILTER_RFID_PRODUCT_ID: "filter_rfid_product_id",
ATTR_FILTER_RFID_TAG: "filter_rfid_tag",
ATTR_FILTER_TYPE: "filter_type",
ATTR_ILLUMINANCE: "illuminance",
}
AVAILABLE_ATTRIBUTES_AIRPURIFIER_V3 = {
# Common set isn't used here. It's a very basic version of the device.
ATTR_AIR_QUALITY_INDEX: "aqi",
ATTR_MODE: "mode",
ATTR_LED: "led",
ATTR_BUZZER: "buzzer",
ATTR_CHILD_LOCK: "child_lock",
ATTR_ILLUMINANCE: "illuminance",
ATTR_FILTER_HOURS_USED: "filter_hours_used",
ATTR_FILTER_LIFE: "filter_life_remaining",
ATTR_MOTOR_SPEED: "motor_speed",
# perhaps supported but unconfirmed
ATTR_AVERAGE_AIR_QUALITY_INDEX: "average_aqi",
ATTR_VOLUME: "volume",
ATTR_MOTOR2_SPEED: "motor2_speed",
ATTR_FILTER_RFID_PRODUCT_ID: "filter_rfid_product_id",
ATTR_FILTER_RFID_TAG: "filter_rfid_tag",
ATTR_FILTER_TYPE: "filter_type",
ATTR_PURIFY_VOLUME: "purify_volume",
ATTR_LEARN_MODE: "learn_mode",
ATTR_SLEEP_TIME: "sleep_time",
ATTR_SLEEP_LEARN_COUNT: "sleep_mode_learn_count",
ATTR_EXTRA_FEATURES: "extra_features",
ATTR_AUTO_DETECT: "auto_detect",
ATTR_USE_TIME: "use_time",
ATTR_BUTTON_PRESSED: "button_pressed",
}
AVAILABLE_ATTRIBUTES_AIRHUMIDIFIER_COMMON = {
ATTR_TEMPERATURE: "temperature",
ATTR_HUMIDITY: "humidity",
ATTR_MODE: "mode",
ATTR_BUZZER: "buzzer",
ATTR_CHILD_LOCK: "child_lock",
ATTR_TARGET_HUMIDITY: "target_humidity",
ATTR_LED_BRIGHTNESS: "led_brightness",
ATTR_USE_TIME: "use_time",
ATTR_HARDWARE_VERSION: "hardware_version",
}
AVAILABLE_ATTRIBUTES_AIRHUMIDIFIER = {
**AVAILABLE_ATTRIBUTES_AIRHUMIDIFIER_COMMON,
ATTR_TRANS_LEVEL: "trans_level",
ATTR_BUTTON_PRESSED: "button_pressed",
}
AVAILABLE_ATTRIBUTES_AIRHUMIDIFIER_CA = {
**AVAILABLE_ATTRIBUTES_AIRHUMIDIFIER_COMMON,
ATTR_MOTOR_SPEED: "motor_speed",
ATTR_DEPTH: "depth",
ATTR_DRY: "dry",
}
AVAILABLE_ATTRIBUTES_AIRFRESH = {
ATTR_TEMPERATURE: "temperature",
ATTR_AIR_QUALITY_INDEX: "aqi",
ATTR_AVERAGE_AIR_QUALITY_INDEX: "average_aqi",
ATTR_CO2: "co2",
ATTR_HUMIDITY: "humidity",
ATTR_MODE: "mode",
ATTR_LED: "led",
ATTR_LED_BRIGHTNESS: "led_brightness",
ATTR_BUZZER: "buzzer",
ATTR_CHILD_LOCK: "child_lock",
ATTR_FILTER_LIFE: "filter_life_remaining",
ATTR_FILTER_HOURS_USED: "filter_hours_used",
ATTR_USE_TIME: "use_time",
ATTR_MOTOR_SPEED: "motor_speed",
ATTR_EXTRA_FEATURES: "extra_features",
}
OPERATION_MODES_AIRPURIFIER = ["Auto", "Silent", "Favorite", "Idle"]
OPERATION_MODES_AIRPURIFIER_PRO = ["Auto", "Silent", "Favorite"]
OPERATION_MODES_AIRPURIFIER_PRO_V7 = OPERATION_MODES_AIRPURIFIER_PRO
OPERATION_MODES_AIRPURIFIER_2S = ["Auto", "Silent", "Favorite"]
OPERATION_MODES_AIRPURIFIER_V3 = [
"Auto",
"Silent",
"Favorite",
"Idle",
"Medium",
"High",
"Strong",
]
OPERATION_MODES_AIRFRESH = ["Auto", "Silent", "Interval", "Low", "Middle", "Strong"]
SUCCESS = ["ok"]
FEATURE_SET_BUZZER = 1
FEATURE_SET_LED = 2
FEATURE_SET_CHILD_LOCK = 4
FEATURE_SET_LED_BRIGHTNESS = 8
FEATURE_SET_FAVORITE_LEVEL = 16
FEATURE_SET_AUTO_DETECT = 32
FEATURE_SET_LEARN_MODE = 64
FEATURE_SET_VOLUME = 128
FEATURE_RESET_FILTER = 256
FEATURE_SET_EXTRA_FEATURES = 512
FEATURE_SET_TARGET_HUMIDITY = 1024
FEATURE_SET_DRY = 2048
FEATURE_FLAGS_AIRPURIFIER = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_LED_BRIGHTNESS
| FEATURE_SET_FAVORITE_LEVEL
| FEATURE_SET_LEARN_MODE
| FEATURE_RESET_FILTER
| FEATURE_SET_EXTRA_FEATURES
)
FEATURE_FLAGS_AIRPURIFIER_PRO = (
FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_FAVORITE_LEVEL
| FEATURE_SET_AUTO_DETECT
| FEATURE_SET_VOLUME
)
FEATURE_FLAGS_AIRPURIFIER_PRO_V7 = (
FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_FAVORITE_LEVEL
| FEATURE_SET_VOLUME
)
FEATURE_FLAGS_AIRPURIFIER_2S = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_FAVORITE_LEVEL
)
FEATURE_FLAGS_AIRPURIFIER_V3 = (
FEATURE_SET_BUZZER | FEATURE_SET_CHILD_LOCK | FEATURE_SET_LED
)
FEATURE_FLAGS_AIRHUMIDIFIER = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_LED_BRIGHTNESS
| FEATURE_SET_TARGET_HUMIDITY
)
FEATURE_FLAGS_AIRHUMIDIFIER_CA = FEATURE_FLAGS_AIRHUMIDIFIER | FEATURE_SET_DRY
FEATURE_FLAGS_AIRFRESH = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_LED_BRIGHTNESS
| FEATURE_RESET_FILTER
| FEATURE_SET_EXTRA_FEATURES
)
SERVICE_SET_BUZZER_ON = "xiaomi_miio_set_buzzer_on"
SERVICE_SET_BUZZER_OFF = "xiaomi_miio_set_buzzer_off"
SERVICE_SET_LED_ON = "xiaomi_miio_set_led_on"
SERVICE_SET_LED_OFF = "xiaomi_miio_set_led_off"
SERVICE_SET_CHILD_LOCK_ON = "xiaomi_miio_set_child_lock_on"
SERVICE_SET_CHILD_LOCK_OFF = "xiaomi_miio_set_child_lock_off"
SERVICE_SET_LED_BRIGHTNESS = "xiaomi_miio_set_led_brightness"
SERVICE_SET_FAVORITE_LEVEL = "xiaomi_miio_set_favorite_level"
SERVICE_SET_AUTO_DETECT_ON = "xiaomi_miio_set_auto_detect_on"
SERVICE_SET_AUTO_DETECT_OFF = "xiaomi_miio_set_auto_detect_off"
SERVICE_SET_LEARN_MODE_ON = "xiaomi_miio_set_learn_mode_on"
SERVICE_SET_LEARN_MODE_OFF = "xiaomi_miio_set_learn_mode_off"
SERVICE_SET_VOLUME = "xiaomi_miio_set_volume"
SERVICE_RESET_FILTER = "xiaomi_miio_reset_filter"
SERVICE_SET_EXTRA_FEATURES = "xiaomi_miio_set_extra_features"
SERVICE_SET_TARGET_HUMIDITY = "xiaomi_miio_set_target_humidity"
SERVICE_SET_DRY_ON = "xiaomi_miio_set_dry_on"
SERVICE_SET_DRY_OFF = "xiaomi_miio_set_dry_off"
AIRPURIFIER_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
SERVICE_SCHEMA_LED_BRIGHTNESS = AIRPURIFIER_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_BRIGHTNESS): vol.All(vol.Coerce(int), vol.Clamp(min=0, max=2))}
)
SERVICE_SCHEMA_FAVORITE_LEVEL = AIRPURIFIER_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_LEVEL): vol.All(vol.Coerce(int), vol.Clamp(min=0, max=17))}
)
SERVICE_SCHEMA_VOLUME = AIRPURIFIER_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_VOLUME): vol.All(vol.Coerce(int), vol.Clamp(min=0, max=100))}
)
SERVICE_SCHEMA_EXTRA_FEATURES = AIRPURIFIER_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_FEATURES): vol.All(vol.Coerce(int), vol.Range(min=0))}
)
SERVICE_SCHEMA_TARGET_HUMIDITY = AIRPURIFIER_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_HUMIDITY): vol.All(
vol.Coerce(int), vol.In([30, 40, 50, 60, 70, 80])
)
}
)
SERVICE_TO_METHOD = {
SERVICE_SET_BUZZER_ON: {"method": "async_set_buzzer_on"},
SERVICE_SET_BUZZER_OFF: {"method": "async_set_buzzer_off"},
SERVICE_SET_LED_ON: {"method": "async_set_led_on"},
SERVICE_SET_LED_OFF: {"method": "async_set_led_off"},
SERVICE_SET_CHILD_LOCK_ON: {"method": "async_set_child_lock_on"},
SERVICE_SET_CHILD_LOCK_OFF: {"method": "async_set_child_lock_off"},
SERVICE_SET_AUTO_DETECT_ON: {"method": "async_set_auto_detect_on"},
SERVICE_SET_AUTO_DETECT_OFF: {"method": "async_set_auto_detect_off"},
SERVICE_SET_LEARN_MODE_ON: {"method": "async_set_learn_mode_on"},
SERVICE_SET_LEARN_MODE_OFF: {"method": "async_set_learn_mode_off"},
SERVICE_RESET_FILTER: {"method": "async_reset_filter"},
SERVICE_SET_LED_BRIGHTNESS: {
"method": "async_set_led_brightness",
"schema": SERVICE_SCHEMA_LED_BRIGHTNESS,
},
SERVICE_SET_FAVORITE_LEVEL: {
"method": "async_set_favorite_level",
"schema": SERVICE_SCHEMA_FAVORITE_LEVEL,
},
SERVICE_SET_VOLUME: {"method": "async_set_volume", "schema": SERVICE_SCHEMA_VOLUME},
SERVICE_SET_EXTRA_FEATURES: {
"method": "async_set_extra_features",
"schema": SERVICE_SCHEMA_EXTRA_FEATURES,
},
SERVICE_SET_TARGET_HUMIDITY: {
"method": "async_set_target_humidity",
"schema": SERVICE_SCHEMA_TARGET_HUMIDITY,
},
SERVICE_SET_DRY_ON: {"method": "async_set_dry_on"},
SERVICE_SET_DRY_OFF: {"method": "async_set_dry_off"},
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the miio fan device from config."""
from miio import Device, DeviceException
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = miio_device.info()
model = device_info.model
unique_id = "{}-{}".format(model, device_info.mac_address)
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException:
raise PlatformNotReady
if model.startswith("zhimi.airpurifier."):
from miio import AirPurifier
air_purifier = AirPurifier(host, token)
device = XiaomiAirPurifier(name, air_purifier, model, unique_id)
elif model.startswith("zhimi.humidifier."):
from miio import AirHumidifier
air_humidifier = AirHumidifier(host, token, model=model)
device = XiaomiAirHumidifier(name, air_humidifier, model, unique_id)
elif model.startswith("zhimi.airfresh."):
from miio import AirFresh
air_fresh = AirFresh(host, token)
device = XiaomiAirFresh(name, air_fresh, model, unique_id)
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/syssi/xiaomi_airpurifier/issues "
"and provide the following data: %s",
model,
)
return False
hass.data[DATA_KEY][host] = device
async_add_entities([device], update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on XiaomiAirPurifier."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
devices = [
device
for device in hass.data[DATA_KEY].values()
if device.entity_id in entity_ids
]
else:
devices = hass.data[DATA_KEY].values()
update_tasks = []
for device in devices:
if not hasattr(device, method["method"]):
continue
await getattr(device, method["method"])(**params)
update_tasks.append(device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
for air_purifier_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[air_purifier_service].get(
"schema", AIRPURIFIER_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, air_purifier_service, async_service_handler, schema=schema
)
class XiaomiGenericDevice(FanEntity):
"""Representation of a generic Xiaomi device."""
def __init__(self, name, device, model, unique_id):
"""Initialize the generic Xiaomi device."""
self._name = name
self._device = device
self._model = model
self._unique_id = unique_id
self._available = False
self._state = None
self._state_attrs = {ATTR_MODEL: self._model}
self._device_features = FEATURE_SET_CHILD_LOCK
self._skip_update = False
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_SET_SPEED
@property
def should_poll(self):
"""Poll the device."""
return True
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@staticmethod
def _extract_value_from_attribute(state, attribute):
value = getattr(state, attribute)
if isinstance(value, Enum):
return value.value
return value
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a miio device command handling error messages."""
from miio import DeviceException
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from miio device: %s", result)
return result == SUCCESS
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn the device on."""
if speed:
# If operation mode was set the device must not be turned on.
result = await self.async_set_speed(speed)
else:
result = await self._try_command(
"Turning the miio device on failed.", self._device.on
)
if result:
self._state = True
self._skip_update = True
async def async_turn_off(self, **kwargs) -> None:
"""Turn the device off."""
result = await self._try_command(
"Turning the miio device off failed.", self._device.off
)
if result:
self._state = False
self._skip_update = True
async def async_set_buzzer_on(self):
"""Turn the buzzer on."""
if self._device_features & FEATURE_SET_BUZZER == 0:
return
await self._try_command(
"Turning the buzzer of the miio device on failed.",
self._device.set_buzzer,
True,
)
async def async_set_buzzer_off(self):
"""Turn the buzzer off."""
if self._device_features & FEATURE_SET_BUZZER == 0:
return
await self._try_command(
"Turning the buzzer of the miio device off failed.",
self._device.set_buzzer,
False,
)
async def async_set_child_lock_on(self):
"""Turn the child lock on."""
if self._device_features & FEATURE_SET_CHILD_LOCK == 0:
return
await self._try_command(
"Turning the child lock of the miio device on failed.",
self._device.set_child_lock,
True,
)
async def async_set_child_lock_off(self):
"""Turn the child lock off."""
if self._device_features & FEATURE_SET_CHILD_LOCK == 0:
return
await self._try_command(
"Turning the child lock of the miio device off failed.",
self._device.set_child_lock,
False,
)
class XiaomiAirPurifier(XiaomiGenericDevice):
"""Representation of a Xiaomi Air Purifier."""
def __init__(self, name, device, model, unique_id):
"""Initialize the plug switch."""
super().__init__(name, device, model, unique_id)
if self._model == MODEL_AIRPURIFIER_PRO:
self._device_features = FEATURE_FLAGS_AIRPURIFIER_PRO
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRPURIFIER_PRO
self._speed_list = OPERATION_MODES_AIRPURIFIER_PRO
elif self._model == MODEL_AIRPURIFIER_PRO_V7:
self._device_features = FEATURE_FLAGS_AIRPURIFIER_PRO_V7
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRPURIFIER_PRO_V7
self._speed_list = OPERATION_MODES_AIRPURIFIER_PRO_V7
elif self._model == MODEL_AIRPURIFIER_2S:
self._device_features = FEATURE_FLAGS_AIRPURIFIER_2S
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRPURIFIER_2S
self._speed_list = OPERATION_MODES_AIRPURIFIER_2S
elif self._model == MODEL_AIRPURIFIER_V3:
self._device_features = FEATURE_FLAGS_AIRPURIFIER_V3
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRPURIFIER_V3
self._speed_list = OPERATION_MODES_AIRPURIFIER_V3
else:
self._device_features = FEATURE_FLAGS_AIRPURIFIER
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRPURIFIER
self._speed_list = OPERATION_MODES_AIRPURIFIER
self._state_attrs.update(
{attribute: None for attribute in self._available_attributes}
)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._state_attrs.update(
{
key: self._extract_value_from_attribute(state, value)
for key, value in self._available_attributes.items()
}
)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._speed_list
@property
def speed(self):
"""Return the current speed."""
if self._state:
from miio.airpurifier import OperationMode
return OperationMode(self._state_attrs[ATTR_MODE]).name
return None
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
if self.supported_features & SUPPORT_SET_SPEED == 0:
return
from miio.airpurifier import OperationMode
_LOGGER.debug("Setting the operation mode to: %s", speed)
await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_mode,
OperationMode[speed.title()],
)
async def async_set_led_on(self):
"""Turn the led on."""
if self._device_features & FEATURE_SET_LED == 0:
return
await self._try_command(
"Turning the led of the miio device off failed.", self._device.set_led, True
)
async def async_set_led_off(self):
"""Turn the led off."""
if self._device_features & FEATURE_SET_LED == 0:
return
await self._try_command(
"Turning the led of the miio device off failed.",
self._device.set_led,
False,
)
async def async_set_led_brightness(self, brightness: int = 2):
"""Set the led brightness."""
if self._device_features & FEATURE_SET_LED_BRIGHTNESS == 0:
return
from miio.airpurifier import LedBrightness
await self._try_command(
"Setting the led brightness of the miio device failed.",
self._device.set_led_brightness,
LedBrightness(brightness),
)
async def async_set_favorite_level(self, level: int = 1):
"""Set the favorite level."""
if self._device_features & FEATURE_SET_FAVORITE_LEVEL == 0:
return
await self._try_command(
"Setting the favorite level of the miio device failed.",
self._device.set_favorite_level,
level,
)
async def async_set_auto_detect_on(self):
"""Turn the auto detect on."""
if self._device_features & FEATURE_SET_AUTO_DETECT == 0:
return
await self._try_command(
"Turning the auto detect of the miio device on failed.",
self._device.set_auto_detect,
True,
)
async def async_set_auto_detect_off(self):
"""Turn the auto detect off."""
if self._device_features & FEATURE_SET_AUTO_DETECT == 0:
return
await self._try_command(
"Turning the auto detect of the miio device off failed.",
self._device.set_auto_detect,
False,
)
async def async_set_learn_mode_on(self):
"""Turn the learn mode on."""
if self._device_features & FEATURE_SET_LEARN_MODE == 0:
return
await self._try_command(
"Turning the learn mode of the miio device on failed.",
self._device.set_learn_mode,
True,
)
async def async_set_learn_mode_off(self):
"""Turn the learn mode off."""
if self._device_features & FEATURE_SET_LEARN_MODE == 0:
return
await self._try_command(
"Turning the learn mode of the miio device off failed.",
self._device.set_learn_mode,
False,
)
async def async_set_volume(self, volume: int = 50):
"""Set the sound volume."""
if self._device_features & FEATURE_SET_VOLUME == 0:
return
await self._try_command(
"Setting the sound volume of the miio device failed.",
self._device.set_volume,
volume,
)
async def async_set_extra_features(self, features: int = 1):
"""Set the extra features."""
if self._device_features & FEATURE_SET_EXTRA_FEATURES == 0:
return
await self._try_command(
"Setting the extra features of the miio device failed.",
self._device.set_extra_features,
features,
)
async def async_reset_filter(self):
"""Reset the filter lifetime and usage."""
if self._device_features & FEATURE_RESET_FILTER == 0:
return
await self._try_command(
"Resetting the filter lifetime of the miio device failed.",
self._device.reset_filter,
)
class XiaomiAirHumidifier(XiaomiGenericDevice):
"""Representation of a Xiaomi Air Humidifier."""
def __init__(self, name, device, model, unique_id):
"""Initialize the plug switch."""
from miio.airhumidifier import OperationMode
super().__init__(name, device, model, unique_id)
if self._model == MODEL_AIRHUMIDIFIER_CA:
self._device_features = FEATURE_FLAGS_AIRHUMIDIFIER_CA
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRHUMIDIFIER_CA
self._speed_list = [
mode.name for mode in OperationMode if mode is not OperationMode.Strong
]
else:
self._device_features = FEATURE_FLAGS_AIRHUMIDIFIER
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRHUMIDIFIER
self._speed_list = [
mode.name for mode in OperationMode if mode is not OperationMode.Auto
]
self._state_attrs.update(
{attribute: None for attribute in self._available_attributes}
)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._state_attrs.update(
{
key: self._extract_value_from_attribute(state, value)
for key, value in self._available_attributes.items()
}
)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._speed_list
@property
def speed(self):
"""Return the current speed."""
if self._state:
from miio.airhumidifier import OperationMode
return OperationMode(self._state_attrs[ATTR_MODE]).name
return None
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
if self.supported_features & SUPPORT_SET_SPEED == 0:
return
from miio.airhumidifier import OperationMode
_LOGGER.debug("Setting the operation mode to: %s", speed)
await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_mode,
OperationMode[speed.title()],
)
async def async_set_led_brightness(self, brightness: int = 2):
"""Set the led brightness."""
if self._device_features & FEATURE_SET_LED_BRIGHTNESS == 0:
return
from miio.airhumidifier import LedBrightness
await self._try_command(
"Setting the led brightness of the miio device failed.",
self._device.set_led_brightness,
LedBrightness(brightness),
)
async def async_set_target_humidity(self, humidity: int = 40):
"""Set the target humidity."""
if self._device_features & FEATURE_SET_TARGET_HUMIDITY == 0:
return
await self._try_command(
"Setting the target humidity of the miio device failed.",
self._device.set_target_humidity,
humidity,
)
async def async_set_dry_on(self):
"""Turn the dry mode on."""
if self._device_features & FEATURE_SET_DRY == 0:
return
await self._try_command(
"Turning the dry mode of the miio device off failed.",
self._device.set_dry,
True,
)
async def async_set_dry_off(self):
"""Turn the dry mode off."""
if self._device_features & FEATURE_SET_DRY == 0:
return
await self._try_command(
"Turning the dry mode of the miio device off failed.",
self._device.set_dry,
False,
)
class XiaomiAirFresh(XiaomiGenericDevice):
"""Representation of a Xiaomi Air Fresh."""
def __init__(self, name, device, model, unique_id):
"""Initialize the miio device."""
super().__init__(name, device, model, unique_id)
self._device_features = FEATURE_FLAGS_AIRFRESH
self._available_attributes = AVAILABLE_ATTRIBUTES_AIRFRESH
self._speed_list = OPERATION_MODES_AIRFRESH
self._state_attrs.update(
{attribute: None for attribute in self._available_attributes}
)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._state_attrs.update(
{
key: self._extract_value_from_attribute(state, value)
for key, value in self._available_attributes.items()
}
)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._speed_list
@property
def speed(self):
"""Return the current speed."""
if self._state:
from miio.airfresh import OperationMode
return OperationMode(self._state_attrs[ATTR_MODE]).name
return None
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
if self.supported_features & SUPPORT_SET_SPEED == 0:
return
from miio.airfresh import OperationMode
_LOGGER.debug("Setting the operation mode to: %s", speed)
await self._try_command(
"Setting operation mode of the miio device failed.",
self._device.set_mode,
OperationMode[speed.title()],
)
async def async_set_led_on(self):
"""Turn the led on."""
if self._device_features & FEATURE_SET_LED == 0:
return
await self._try_command(
"Turning the led of the miio device off failed.", self._device.set_led, True
)
async def async_set_led_off(self):
"""Turn the led off."""
if self._device_features & FEATURE_SET_LED == 0:
return
await self._try_command(
"Turning the led of the miio device off failed.",
self._device.set_led,
False,
)
async def async_set_led_brightness(self, brightness: int = 2):
"""Set the led brightness."""
if self._device_features & FEATURE_SET_LED_BRIGHTNESS == 0:
return
from miio.airfresh import LedBrightness
await self._try_command(
"Setting the led brightness of the miio device failed.",
self._device.set_led_brightness,
LedBrightness(brightness),
)
async def async_set_extra_features(self, features: int = 1):
"""Set the extra features."""
if self._device_features & FEATURE_SET_EXTRA_FEATURES == 0:
return
await self._try_command(
"Setting the extra features of the miio device failed.",
self._device.set_extra_features,
features,
)
async def async_reset_filter(self):
"""Reset the filter lifetime and usage."""
if self._device_features & FEATURE_RESET_FILTER == 0:
return
await self._try_command(
"Resetting the filter lifetime of the miio device failed.",
self._device.reset_filter,
)
|
|
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import json
import falcon
import urllib
import uuid
import settings
import requests
from geopy.geocoders import Nominatim
import geopy.distance
from geopy.distance import vincenty
import datetime
radius = []
radius_maps = []
#geoJSON template to create radius (polygon) on geojson.io
geoJSON_template = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
]
}
}
]
}
class interest(object):
global radius
interested = {}
#radius = []i
def proximity_to_others(self, my_coordinates):
if radius:
for x in radius:
radius_center = (x['center'][0],x['center'][1])
my_coordinates = (my_coordinates[0], my_coordinates[1])
distance = vincenty(radius_center, my_coordinates).kilometers
print("Proximity distance")
print(distance)
return distance, x["center"]
else:
return 0, []
def geojson_io_prox(self, resp, my_coordinates, user_name):
global radius
distance = 0
radius = []
try:
distance,radius = self.proximity_to_others(my_coordinates)
except Exception as e:
print(e)
if not distance or distance < 1:
points = []
start = geopy.Point(my_coordinates[0], my_coordinates[1])
d = geopy.distance.VincentyDistance(kilometers = 1)
for x in range(0,360, 10):
points.append(d.destination(point=start, bearing=x))
print("\n\n POINTS")
print("\n\n")
radius_dict = {
'center': my_coordinates,
'radius': points,
'people': [user_name,],
'created_date': datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S %Z %Y")
}
radius.append(radius_dict)
print("\n\n RADIUS: ")
print(radius)
print("\n\n")
else:
for x in radius:
if x["center"] == radius:
x['people'].append(
{'name': user_name,
'coordinates':
my_coordinates}
)
def proximity(self,req, resp, my_coordinates, user_name):
# Works out user/client proximity to mytransport API stops
# Works on a radius of 1km. Assumption on average walk time
global radius_maps
google_map_url = "http://www.google.com/maps/place/"
query_params = {"point":"{},{}".format(my_coordinates[0], my_coordinates[1]),
"radius":"1000"}
endpoint ="api/stops"
headers = {"Authorization": "Bearer {}".format(settings.ACCESS_TOKEN)}
request = requests.get("{}/{}".format(settings.API_URL,endpoint),
params=query_params,
headers=headers)
print("Response from api/stops")
print(request.status_code)
response_data = request.json()
print(type(response_data))
if not response_data:
resp.status = falcon.HTTP_200
your_radius_map = ""
for x in radius_maps:
if x["center"] == my_coordinates:
your_radius_map = x["geoJSON_url"]
messge_dict = {'message' :
"No stops in your area, adding you to interest area", "maps": your_radius_map}
resp.body = json.dumps(messge_dict)
return False
else:
map_list = []
message_dict = {"message":"", "maps":[]}
for x in response_data:
print(x)
if 'geometry' in x:
coordinates = x["geometry"]["coordinates"]
map_list.append("{}{},{}".format(google_map_url,
coordinates[1],
coordinates[0]))
message_dict["maps"] = map_list
if message_dict:
message_dict["message"] = """You have existing stops within 1km
of your location"""
else:
message_dict["messsage"] = """You\shave no existing stops nearby,
we will combine your interest in a stop with others in the area"""
resp.body = json.dumps(message_dict)
resp.status = falcon.HTTP_200
return True
#return True
def geopy_coordinates(self, address,resp):
try:
geolocator = Nominatim()
location = geolocator.geocode(address)
if location.latitude and location.longitude:
return [location.latitude, location.longitude]
except Exception as e:
print(e)
resp.body = """{'message':'Bad address,
try being more specific and try agai'}"""
resp.status = falcon.HTTP_400
def on_get(self, req, resp):
resp_dict = {"message":"Post request needed with GeoLocation data"}
resp.body = json.dumps(resp_dict)
resp.status = falcon.HTTP_200
def on_post(self, req, resp):
# Main API method, post the following
'''
POST Request
data type: JSON
Required: name, address or coordinates
data format : {
"name" : "Yourname",
"address" : "Your number and street address, province, etc"
"geometry" : { "coordinates" : ["x", "y"] }
'''
global radius_maps
global radius
print(req.headers)
user_name = ""
post_data = json.load(req.stream)
print(post_data)
if "name" in post_data:
user_name = post_data["name"]
print("Username IF statement")
print(user_name)
if "geometry" in post_data:
if not self.proximity(req,resp, post_data["geometry"]["coordinates"],user_name):
self.geojson_io_prox(resp, post_data["geometry"]["coordinates"],user_name)
elif post_data["address"]:
if "address" in post_data:
my_coordinates = self.geopy_coordinates(post_data["address"],resp)
print("BASED ON ADDRESS")
proximity = self.proximity(req, resp, my_coordinates, user_name)
print("PROXIMITY")
print(proximity)
if proximity == False:
print("NO routes")
self.geojson_io_prox(resp,my_coordinates, user_name)
else:
falcon.HTTPMissingParam
resp_dict = { 'message' :
'Please supply a address or coordinates (long,lat)'}
# json.dumps allows proper formating of message
resp.body = json.dumps(resp_dict)
print("Current Radius")
print(radius)
radius_list = []
radius_maps = []
for x in radius:
for y in x['radius']:
radius_list.append([y[1],y[0]])
radius_list.append([x['radius'][0][1],x['radius'][0][0]])
geoJSON_template['features'][0]['geometry']['coordinates'].append(radius_list)
radius_maps.append( {
'center': x['center'],
'geoJSON': geoJSON_template,
'geoJSON_url' : "http://geojson.io/#map=5/{}/{}&data=data:application/json,{}".format(
x['center'][1], x['center'][0], urllib.quote(json.dumps(geoJSON_template).encode()) )
}
)
#resp.body
print(radius_maps)
|
|
import weakref
import asyncio
from asyncio import CancelledError
import traceback
from functools import partial
import logging
logger = logging.getLogger("seamless")
def print_info(*args):
msg = " ".join([str(arg) for arg in args])
logger.info(msg)
def print_warning(*args):
msg = " ".join([str(arg) for arg in args])
logger.warning(msg)
def print_debug(*args):
msg = " ".join([str(arg) for arg in args])
logger.debug(msg)
def print_error(*args):
msg = " ".join([str(arg) for arg in args])
logger.error(msg)
def is_equal(old, new):
if new is None:
return False
if len(old) != len(new):
return False
for k in old:
if old[k] != new[k]:
return False
return True
_evaluation_locks = [None] * 20 # twenty evaluations in parallel
def set_parallel_evaluations(evaluations):
if len(_evaluation_locks) != evaluations:
if any(_evaluation_locks):
msg = "WARNING: Cannot change number of parallel evaluations from %d to %d since there are running evaluations"
print(msg % (len(_evaluation_locks), evaluations), file=sys.stderr)
else:
_evaluation_locks[:] = [None] * evaluations
async def acquire_evaluation_lock(task):
while 1:
for locknr, lock in enumerate(_evaluation_locks):
if lock is None:
_evaluation_locks[locknr] = task
return locknr
await asyncio.sleep(0.01)
def release_evaluation_lock(locknr):
assert _evaluation_locks[locknr] is not None
_evaluation_locks[locknr] = None
class Task:
_realtask = None
_awaiting = False
_canceled = False
_started = False
_cleaned = False
_cached_root = None
_runner = None
future = None
caller_count = None
def __init__(self, manager, *args, **kwargs):
if isinstance(manager, weakref.ref):
manager = manager()
assert isinstance(manager, Manager)
self._dependencies = []
taskmanager = manager.taskmanager
reftask = None
if self.refkey is not None:
reftask = taskmanager.reftasks.get(self.refkey)
if reftask is not None:
self.set_realtask(reftask)
else:
taskmanager.reftasks[self.refkey] = self
taskmanager.rev_reftasks[self] = self.refkey
self.manager = weakref.ref(manager)
if reftask is None:
self.refholders = [self] # tasks that are value-identical to this one,
# of which this one is the realtask
taskmanager._task_id_counter += 1
self.taskid = taskmanager._task_id_counter
self.caller_count = 0
@property
def refkey(self):
return None
@property
def dependencies(self):
if self._realtask is not None:
return self._realtask.dependencies
else:
return self._dependencies
def _root(self):
if self._cached_root is not None:
return self._cached_root
root = None
for dep in self._dependencies:
deproot = dep._root()
if root is None:
root = deproot
elif deproot is not None:
assert root is deproot, (root, deproot) # tasks cannot depend on multiple toplevel contexts
self._cached_root = root
return root
def set_realtask(self, realtask):
self._realtask = realtask
realtask.refholders.append(self)
async def run(self):
realtask = self._realtask
if realtask is not None:
result = await realtask.run()
return result
already_launched = (self.future is not None)
if not already_launched:
self._launch()
assert self.future is not None
if self.future.done():
return self.future.result()
self._awaiting = True
try:
if self.caller_count != -999:
self.caller_count += 1
await asyncio.shield(self.future)
except CancelledError:
if self.caller_count != -999:
self.caller_count -= 1
if self.caller_count == 0:
print_debug("CANCELING", self.__class__.__name__, self.taskid)
self.cancel()
raise CancelledError from None
return self.future.result()
async def _run0(self, taskmanager):
taskmanager.launching_tasks.discard(self)
await asyncio.shield(taskmanager.await_active())
await asyncio.shield(communion_server.startup)
while len(taskmanager.synctasks):
await asyncio.sleep(0.001)
if not isinstance(self, (UponConnectionTask, EvaluateExpressionTask, GetBufferTask, BackgroundTask)):
await taskmanager.await_barrier(self.taskid)
if isinstance(self, StructuredCellAuthTask):
scell = self.dependencies[0]
# for efficiency, just wait a few msec, since we often get re-triggered
await asyncio.sleep(0.005)
if isinstance(self, StructuredCellJoinTask):
scell = self.dependencies[0]
# for joining, also wait a bit, but this is a kludge;
# for some reason, joins are more often launched than they should
# (see highlevel/tests/joincache.py)
# TODO: look more into it
await asyncio.sleep(0.001)
self._started = True
print_debug("RUN", self.__class__.__name__, self.taskid, self.dependencies)
self._runner = self._run()
return await self._runner
def _launch(self):
manager = self.manager()
if manager is None or manager._destroyed:
return
taskmanager = manager.taskmanager
if self.future is not None:
return taskmanager
if self._canceled:
taskmanager.declare_task_finished(self.taskid)
taskmanager.launching_tasks.discard(self)
return
awaitable = self._run0(taskmanager)
self.future = asyncio.ensure_future(awaitable)
taskmanager.add_task(self)
return taskmanager
def launch(self):
realtask = self._realtask
if realtask is not None:
return realtask.launch()
if self.future is not None:
return
manager = self.manager()
if manager is None or manager._destroyed:
return
taskmanager = manager.taskmanager
taskmanager.launching_tasks.add(self)
self._launch()
self.caller_count = -999
def launch_and_await(self):
assert not asyncio.get_event_loop().is_running()
realtask = self._realtask
if realtask is not None:
return realtask.launch_and_await()
# Blocking version of launch
taskmanager = self._launch()
self._awaiting = True
if taskmanager is None:
raise CancelledError
taskmanager.loop.run_until_complete(self.future)
return self.future.result()
def cancel_refholder(self, refholder):
assert self._realtask is None
self.refholders.remove(refholder)
if not len(self.refholders):
self.cancel()
def cancel(self):
if self._canceled:
return
manager = self.manager()
if manager is None or manager._destroyed:
return
taskmanager = manager.taskmanager
self._canceled = True
print_debug("CANCEL", self.__class__.__name__, self.taskid, self.dependencies)
realtask = self._realtask
if realtask is not None:
return realtask.cancel_refholder(self)
if self.future is not None:
if not self.future.cancelled():
self.future.cancel()
taskmanager.launching_tasks.discard(self)
taskmanager.cancel_task(self)
def __str__(self):
return(self.__class__.__name__ + " " + str(self.taskid))
class BackgroundTask(Task):
pass
from .set_value import SetCellValueTask
from .set_buffer import SetCellBufferTask
from .serialize_buffer import SerializeToBufferTask
from .deserialize_buffer import DeserializeBufferTask
from .checksum import CellChecksumTask, CalculateChecksumTask
from .cell_update import CellUpdateTask
from .get_buffer import GetBufferTask
from .upon_connection import UponConnectionTask, UponBiLinkTask
from .structured_cell import StructuredCellAuthTask, StructuredCellJoinTask
from .evaluate_expression import EvaluateExpressionTask
from .get_buffer import GetBufferTask
from ..manager import Manager
from ....communion_server import communion_server
|
|
from __future__ import absolute_import
import unittest
from tinyquery import lexer
plus = ('PLUS', '+')
minus = ('MINUS', '-')
star = ('STAR', '*')
divided_by = ('DIVIDED_BY', '/')
mod = ('MOD', '%')
equals = ('EQUALS', '=')
doubleequals = ('EQUALS', '==')
not_equal = ('NOT_EQUAL', '!=')
greater_than = ('GREATER_THAN', '>')
less_than = ('LESS_THAN', '<')
greater_than_or_equal = ('GREATER_THAN_OR_EQUAL', '>=')
less_than_or_equal = ('LESS_THAN_OR_EQUAL', '<=')
not_tok = ('NOT', 'not')
is_tok = ('IS', 'is')
null_tok = ('NULL', 'null')
true_tok = ('TRUE', 'true')
false_tok = ('FALSE', 'false')
in_tok = ('IN', 'in')
select = ('SELECT', 'select')
as_tok = ('AS', 'as')
from_tok = ('FROM', 'from')
where = ('WHERE', 'where')
join = ('JOIN', 'join')
on = ('ON', 'on')
group = ('GROUP', 'group')
by = ('BY', 'by')
each = ('EACH', 'each')
left = ('LEFT', 'left')
outer = ('OUTER', 'outer')
cross = ('CROSS', 'cross')
order = ('ORDER', 'order')
asc = ('ASC', 'asc')
desc = ('DESC', 'desc')
limit = ('LIMIT', 'limit')
lparen = ('LPAREN', '(')
rparen = ('RPAREN', ')')
comma = ('COMMA', ',')
dot = ('DOT', '.')
case = ('CASE', 'case')
when = ('WHEN', 'when')
then = ('THEN', 'then')
else_ = ('ELSE', 'else')
end = ('END', 'end')
contains = ('CONTAINS', 'contains')
within = ('WITHIN', 'within')
record = ('RECORD', 'record')
count = ('COUNT', 'count')
def int_(n):
return 'INTEGER', n
def flt(f):
return 'FLOAT', f
def ident(name):
return 'ID', name
def string(s):
return 'STRING', s
class LexerTest(unittest.TestCase):
def assert_tokens(self, text, expected_tokens):
tokens = lexer.lex_text(text)
self.assertEqual(expected_tokens,
[(tok.type, tok.value) for tok in tokens])
def test_lex_simple_select(self):
self.assert_tokens('SELECT 0', [select, int_(0)])
def test_lex_addition(self):
self.assert_tokens('SELECT 1 + 2', [select, int_(1), plus, int_(2)])
def test_arithmetic_operators(self):
self.assert_tokens(
'SELECT 0 + 1 - 2 * 3 / 4 % 5',
[select, int_(0), plus, int_(1), minus, int_(2), star, int_(3),
divided_by, int_(4), mod, int_(5)])
def test_select_from_table(self):
self.assert_tokens(
'SELECT foo FROM bar',
[select, ident('foo'), from_tok, ident('bar')])
def test_comparisons(self):
self.assert_tokens(
'SELECT 1 > 2 <= 3 = 4 != 5 < 6 >= 7',
[select, int_(1), greater_than, int_(2), less_than_or_equal,
int_(3), equals, int_(4), not_equal, int_(5), less_than, int_(6),
greater_than_or_equal, int_(7)]
)
def test_parens(self):
self.assert_tokens(
'SELECT 2 * (3 + 4)',
[select, int_(2), star, lparen, int_(3), plus, int_(4), rparen]
)
def test_negative_numbers(self):
self.assert_tokens(
'SELECT -5',
[select, minus, int_(5)]
)
def test_floating_numbers(self):
self.assert_tokens(
'SELECT 5.3',
[select, flt(5.3)]
)
self.assert_tokens(
'SELECT 5.3E4',
[select, flt(53000.0)]
)
self.assert_tokens(
'SELECT 5.3e2',
[select, flt(530.0)]
)
def test_function_call(self):
self.assert_tokens(
'SELECT ABS(-5), POW(x, 3), NOW() FROM test_table',
[select, ident('ABS'), lparen, minus, int_(5), rparen, comma,
ident('POW'), lparen, ident('x'), comma, int_(3), rparen, comma,
ident('NOW'), lparen, rparen, from_tok, ident('test_table')]
)
def test_select_where(self):
self.assert_tokens(
'SELECT foo FROM bar WHERE foo > 3',
[select, ident('foo'), from_tok, ident('bar'), where, ident('foo'),
greater_than, int_(3)]
)
def test_multiple_select(self):
self.assert_tokens(
'SELECT a AS foo, b bar, a + 1 baz FROM test_table',
[select, ident('a'), as_tok, ident('foo'), comma, ident('b'),
ident('bar'), comma, ident('a'), plus, int_(1), ident('baz'),
from_tok, ident('test_table')])
def test_aggregates(self):
self.assert_tokens(
'SELECT MAX(foo) FROM bar',
[select, ident('MAX'), lparen, ident('foo'), rparen, from_tok,
ident('bar')]
)
def test_group_by(self):
self.assert_tokens(
'SELECT foo FROM bar GROUP BY baz',
[select, ident('foo'), from_tok, ident('bar'), group, by,
ident('baz')])
def test_select_multiple_tales(self):
self.assert_tokens(
'SELECT foo FROM table1, table2',
[select, ident('foo'), from_tok, ident('table1'), comma,
ident('table2')])
def test_subquery(self):
self.assert_tokens(
'SELECT foo FROM (SELECT val + 1 AS foo FROM test_table)',
[select, ident('foo'), from_tok, lparen, select, ident('val'),
plus, int_(1), as_tok, ident('foo'), from_tok,
ident('test_table'), rparen]
)
def test_join(self):
self.assert_tokens(
'SELECT foo FROM table1 JOIN table2 ON table1.bar = table2.bar',
[select, ident('foo'), from_tok, ident('table1'), join,
ident('table2'), on, ident('table1'), dot, ident('bar'), equals,
ident('table2'), dot, ident('bar')]
)
def test_null_comparisons(self):
self.assert_tokens(
'SELECT foo IS NULL, bar IS NOT NULL FROM table1',
[select, ident('foo'), is_tok, null_tok, comma, ident('bar'),
is_tok, not_tok, null_tok, from_tok, ident('table1')]
)
def test_group_each_by(self):
self.assert_tokens(
'SELECT 0 FROM table GROUP EACH BY foo',
[select, int_(0), from_tok, ident('table'), group, each, by,
ident('foo')]
)
def test_string_literal(self):
self.assert_tokens(
'SELECT foo = "hello", bar == \'world\' FROM table',
[select, ident('foo'), equals, string('hello'), comma,
ident('bar'), doubleequals, string('world'), from_tok,
ident('table')]
)
self.assert_tokens(
'SELECT foo = r"hello", bar == r\'world\' FROM table',
[select, ident('foo'), equals, string('hello'), comma,
ident('bar'), doubleequals, string('world'), from_tok,
ident('table')]
)
def test_other_literals(self):
self.assert_tokens(
'SELECT true, false, null',
[select, true_tok, comma, false_tok, comma, null_tok])
def test_in(self):
self.assert_tokens(
'SELECT 1 IN (1, 2)',
[select, int_(1), in_tok, lparen, int_(1), comma, int_(2), rparen])
def test_comment(self):
self.assert_tokens(
"""
SELECT -- This selects things
foo -- The first field
FROM bar
""",
[select, ident('foo'), from_tok, ident('bar')])
self.assert_tokens(
"""
SELECT # This selects things
foo # The first field
FROM bar
""",
[select, ident('foo'), from_tok, ident('bar')])
self.assert_tokens(
"""
SELECT // This selects things
foo // The first field
FROM bar
""",
[select, ident('foo'), from_tok, ident('bar')])
def test_left_outer_join(self):
self.assert_tokens(
'SELECT foo FROM t1 LEFT OUTER JOIN EACH t2 ON t1.foo = t2.bar',
[select, ident('foo'), from_tok, ident('t1'), left, outer, join,
each, ident('t2'), on, ident('t1'), dot, ident('foo'), equals,
ident('t2'), dot, ident('bar')])
def test_cross_join(self):
self.assert_tokens(
'SELECT 0 FROM t1 CROSS JOIN t2',
[select, int_(0), from_tok, ident('t1'), cross, join, ident('t2')]
)
def test_limit(self):
self.assert_tokens(
'SELECT * FROM my_table LIMIT 10',
[select, star, from_tok, ident('my_table'), limit, int_(10)]
)
def test_order_by(self):
self.assert_tokens(
'SELECT * FROM my_table ORDER BY val DESC, val2 ASC,',
[select, star, from_tok, ident('my_table'), order, by,
ident('val'), desc, comma, ident('val2'), asc, comma]
)
def test_square_brackets(self):
# It looks like square brackets are handled at lex time. For example,
# putting a space between the square brackets and the table name isn't
# allowed.
self.assert_tokens(
'SELECT [max](val) FROM [2014.test_table]',
[select, ident('max'), lparen, ident('val'), rparen, from_tok,
ident('2014.test_table')]
)
def test_contains(self):
self.assert_tokens(
'SELECT a CONTAINS b',
[select, ident('a'), contains, ident('b')]
)
def test_within_record(self):
self.assert_tokens(
'SELECT COUNT(*) WITHIN RECORD AS something FROM bar',
[select, count, lparen, star, rparen, within, record,
as_tok, ident('something'), from_tok, ident('bar')]
)
def test_within_clause(self):
self.assert_tokens(
'SELECT COUNT(citiesLived.yearsLived) WITHIN citiesLived AS '
'numberOfTimesInEachCity FROM table',
[select, count, lparen, ident('citiesLived'), dot,
ident('yearsLived'), rparen, within, ident('citiesLived'), as_tok,
ident('numberOfTimesInEachCity'), from_tok, ident('table')]
)
def test_case(self):
self.assert_tokens(
'SELECT CASE WHEN x = 1 THEN 1 WHEN x = 2 THEN 4 ELSE 9 END',
[select, case, when, ident('x'), equals, int_(1), then, int_(1),
when, ident('x'), equals, int_(2), then, int_(4), else_, int_(9),
end]
)
|
|
import sys
from decimal import Decimal
import numpy as np
from numpy.testing import *
from numpy.testing.utils import WarningManager
import warnings
class TestEinSum(TestCase):
def test_einsum_errors(self):
# Need enough arguments
assert_raises(ValueError, np.einsum)
assert_raises(ValueError, np.einsum, "")
# subscripts must be a string
assert_raises(TypeError, np.einsum, 0, 0)
# out parameter must be an array
assert_raises(TypeError, np.einsum, "", 0, out='test')
# order parameter must be a valid order
assert_raises(TypeError, np.einsum, "", 0, order='W')
# casting parameter must be a valid casting
assert_raises(ValueError, np.einsum, "", 0, casting='blah')
# dtype parameter must be a valid dtype
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type')
# other keyword arguments are rejected
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0)
# number of operands must match count in subscripts string
assert_raises(ValueError, np.einsum, "", 0, 0)
assert_raises(ValueError, np.einsum, ",", 0, [0], [0])
assert_raises(ValueError, np.einsum, ",", [0])
# can't have more subscripts than dimensions in the operand
assert_raises(ValueError, np.einsum, "i", 0)
assert_raises(ValueError, np.einsum, "ij", [0,0])
assert_raises(ValueError, np.einsum, "...i", 0)
assert_raises(ValueError, np.einsum, "i...j", [0,0])
assert_raises(ValueError, np.einsum, "i...", 0)
assert_raises(ValueError, np.einsum, "ij...", [0,0])
# invalid ellipsis
assert_raises(ValueError, np.einsum, "i..", [0,0])
assert_raises(ValueError, np.einsum, ".i...", [0,0])
assert_raises(ValueError, np.einsum, "j->..j", [0,0])
assert_raises(ValueError, np.einsum, "j->.j...", [0,0])
# invalid subscript character
assert_raises(ValueError, np.einsum, "i%...", [0,0])
assert_raises(ValueError, np.einsum, "...j$", [0,0])
assert_raises(ValueError, np.einsum, "i->&", [0,0])
# output subscripts must appear in input
assert_raises(ValueError, np.einsum, "i->ij", [0,0])
# output subscripts may only be specified once
assert_raises(ValueError, np.einsum, "ij->jij", [[0,0],[0,0]])
# dimensions much match when being collapsed
assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2,3))
assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2,3))
# broadcasting to new dimensions must be enabled explicitly
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2,3))
assert_raises(ValueError, np.einsum, "i->i", [[0,1],[0,1]],
out=np.arange(4).reshape(2,2))
def test_einsum_views(self):
# pass-through
a = np.arange(6)
a.shape = (2,3)
b = np.einsum("...", a)
assert_(b.base is a)
b = np.einsum(a, [Ellipsis])
assert_(b.base is a)
b = np.einsum("ij", a)
assert_(b.base is a)
assert_equal(b, a)
b = np.einsum(a, [0,1])
assert_(b.base is a)
assert_equal(b, a)
# transpose
a = np.arange(6)
a.shape = (2,3)
b = np.einsum("ji", a)
assert_(b.base is a)
assert_equal(b, a.T)
b = np.einsum(a, [1,0])
assert_(b.base is a)
assert_equal(b, a.T)
# diagonal
a = np.arange(9)
a.shape = (3,3)
b = np.einsum("ii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i,i] for i in range(3)])
b = np.einsum(a, [0,0], [0])
assert_(b.base is a)
assert_equal(b, [a[i,i] for i in range(3)])
# diagonal with various ways of broadcasting an additional dimension
a = np.arange(27)
a.shape = (3,3,3)
b = np.einsum("...ii->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)] for x in a])
b = np.einsum(a, [Ellipsis,0,0], [Ellipsis,0])
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)] for x in a])
b = np.einsum("ii...->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(2,0,1)])
b = np.einsum(a, [0,0,Ellipsis], [Ellipsis,0])
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(2,0,1)])
b = np.einsum("...ii->i...", a)
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum(a, [Ellipsis,0,0], [0,Ellipsis])
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum("jii->ij", a)
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum(a, [1,0,0], [0,1])
assert_(b.base is a)
assert_equal(b, [a[:,i,i] for i in range(3)])
b = np.einsum("ii...->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)])
b = np.einsum(a, [0,0,Ellipsis], [0,Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(2,0,1)[:,i,i] for i in range(3)])
b = np.einsum("i...i->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)])
b = np.einsum(a, [0,Ellipsis,0], [0,Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(1,0,2)[:,i,i] for i in range(3)])
b = np.einsum("i...i->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(1,0,2)])
b = np.einsum(a, [0,Ellipsis,0], [Ellipsis,0])
assert_(b.base is a)
assert_equal(b, [[x[i,i] for i in range(3)]
for x in a.transpose(1,0,2)])
# triple diagonal
a = np.arange(27)
a.shape = (3,3,3)
b = np.einsum("iii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i,i,i] for i in range(3)])
b = np.einsum(a, [0,0,0], [0])
assert_(b.base is a)
assert_equal(b, [a[i,i,i] for i in range(3)])
# swap axes
a = np.arange(24)
a.shape = (2,3,4)
b = np.einsum("ijk->jik", a)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0,1))
b = np.einsum(a, [0,1,2], [1,0,2])
assert_(b.base is a)
assert_equal(b, a.swapaxes(0,1))
def check_einsum_sums(self, dtype):
# Check various sums. Does many sizes to exercise unrolled loops.
# sum(a, axis=-1)
for n in range(1,17):
a = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [0], []),
np.sum(a, axis=-1).astype(dtype))
for n in range(1,17):
a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
assert_equal(np.einsum("...i->...", a),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [Ellipsis,0], [Ellipsis]),
np.sum(a, axis=-1).astype(dtype))
# sum(a, axis=0)
for n in range(1,17):
a = np.arange(2*n, dtype=dtype).reshape(2,n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
for n in range(1,17):
a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0,Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
# trace(a)
for n in range(1,17):
a = np.arange(n*n, dtype=dtype).reshape(n,n)
assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype))
assert_equal(np.einsum(a, [0,0]), np.trace(a).astype(dtype))
# multiply(a, b)
for n in range(1,17):
a = np.arange(3*n, dtype=dtype).reshape(3,n)
b = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b))
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]),
np.multiply(a, b))
# inner(a,b)
for n in range(1,17):
a = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b))
assert_equal(np.einsum(a, [Ellipsis,0], b, [Ellipsis,0]),
np.inner(a, b))
for n in range(1,11):
a = np.arange(n*3*2, dtype=dtype).reshape(n,3,2)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T)
assert_equal(np.einsum(a, [0,Ellipsis], b, [0,Ellipsis]),
np.inner(a.T, b.T).T)
# outer(a,b)
for n in range(1,17):
a = np.arange(3, dtype=dtype)+1
b = np.arange(n, dtype=dtype)+1
assert_equal(np.einsum("i,j", a, b), np.outer(a, b))
assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
ctx = WarningManager()
ctx.__enter__()
try:
warnings.simplefilter('ignore', np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
for n in range(1,17):
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ij, j", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0,1], b, [1]), np.dot(a, b))
c = np.arange(4, dtype=dtype)
np.einsum("ij,j", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0,1], b, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
for n in range(1,17):
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T))
assert_equal(np.einsum(a.T, [1,0], b.T, [1]), np.dot(b.T, a.T))
c = np.arange(4, dtype=dtype)
np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a.T, [1,0], b.T, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
for n in range(1,17):
if n < 8 or dtype != 'f2':
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n*6, dtype=dtype).reshape(n,6)
assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0,1], b, [1,2]), np.dot(a, b))
for n in range(1,17):
a = np.arange(4*n, dtype=dtype).reshape(4,n)
b = np.arange(n*6, dtype=dtype).reshape(n,6)
c = np.arange(24, dtype=dtype).reshape(4,6)
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0,1], b, [1,2], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
# matrix triple product (note this is not currently an efficient
# way to multiply 3 matrices)
a = np.arange(12, dtype=dtype).reshape(3,4)
b = np.arange(20, dtype=dtype).reshape(4,5)
c = np.arange(30, dtype=dtype).reshape(5,6)
if dtype != 'f2':
assert_equal(np.einsum("ij,jk,kl", a, b, c),
a.dot(b).dot(c))
assert_equal(np.einsum(a, [0,1], b, [1,2], c, [2,3]),
a.dot(b).dot(c))
d = np.arange(18, dtype=dtype).reshape(3,6)
np.einsum("ij,jk,kl", a, b, c, out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
d[...] = 0
np.einsum(a, [0,1], b, [1,2], c, [2,3], out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
# tensordot(a, b)
if np.dtype(dtype) != np.dtype('f2'):
a = np.arange(60, dtype=dtype).reshape(3,4,5)
b = np.arange(24, dtype=dtype).reshape(4,3,2)
assert_equal(np.einsum("ijk, jil -> kl", a, b),
np.tensordot(a,b, axes=([1,0],[0,1])))
assert_equal(np.einsum(a, [0,1,2], b, [1,0,3], [2,3]),
np.tensordot(a,b, axes=([1,0],[0,1])))
c = np.arange(10, dtype=dtype).reshape(5,2)
np.einsum("ijk,jil->kl", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1,0],[0,1])).astype(dtype))
c[...] = 0
np.einsum(a, [0,1,2], b, [1,0,3], [2,3], out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1,0],[0,1])).astype(dtype))
finally:
ctx.__exit__()
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
c = np.array([True,True,False,True,True,False,True,True])
assert_equal(np.einsum("i,i,i->i", a, b, c,
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
a = np.arange(9, dtype=dtype)
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
# Various stride0, contiguous, and SSE aligned variants
for n in range(1,25):
a = np.arange(n, dtype=dtype)
if np.dtype(dtype).itemsize > 1:
assert_equal(np.einsum("...,...",a,a), np.multiply(a,a))
assert_equal(np.einsum("i,i", a, a), np.dot(a,a))
assert_equal(np.einsum("i,->i", a, 2), 2*a)
assert_equal(np.einsum(",i->i", 2, a), 2*a)
assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a))
assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a))
assert_equal(np.einsum("...,...",a[1:],a[:-1]),
np.multiply(a[1:],a[:-1]))
assert_equal(np.einsum("i,i", a[1:], a[:-1]),
np.dot(a[1:],a[:-1]))
assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:])
assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:])
assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:]))
assert_equal(np.einsum(",i->", 2, a[1:]), 2*np.sum(a[1:]))
# An object array, summed as the data type
a = np.arange(9, dtype=object)
b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
# A case which was failing (ticket #1885)
p = np.arange(2) + 1
q = np.arange(4).reshape(2,2) + 3
r = np.arange(4).reshape(2,2) + 7
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1');
def test_einsum_sums_uint8(self):
self.check_einsum_sums('u1');
def test_einsum_sums_int16(self):
self.check_einsum_sums('i2');
def test_einsum_sums_uint16(self):
self.check_einsum_sums('u2');
def test_einsum_sums_int32(self):
self.check_einsum_sums('i4');
def test_einsum_sums_uint32(self):
self.check_einsum_sums('u4');
def test_einsum_sums_int64(self):
self.check_einsum_sums('i8');
def test_einsum_sums_uint64(self):
self.check_einsum_sums('u8');
def test_einsum_sums_float16(self):
self.check_einsum_sums('f2');
def test_einsum_sums_float32(self):
self.check_einsum_sums('f4');
def test_einsum_sums_float64(self):
self.check_einsum_sums('f8');
def test_einsum_sums_longdouble(self):
self.check_einsum_sums(np.longdouble);
def test_einsum_sums_cfloat64(self):
self.check_einsum_sums('c8');
def test_einsum_sums_cfloat128(self):
self.check_einsum_sums('c16');
def test_einsum_sums_clongdouble(self):
self.check_einsum_sums(np.clongdouble);
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1,2))
b = np.ones((2,2,1))
assert_equal(np.einsum('ij...,j...->i...',a,b), [[[2],[2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->',a,b,b),
np.einsum('ijklm,ijn->',a,b))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
if __name__ == "__main__":
run_module_suite()
|
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Main snapshotter module
Snapshotter creates an immutable scope around an object (e.g. Audit) where
snapshot object represent a join between parent object (Audit),
child object (e.g. Control, Regulation, ...) and a particular revision.
"""
from sqlalchemy.sql.expression import tuple_
from sqlalchemy.sql.expression import bindparam
from ggrc import db
from ggrc import models
from ggrc.login import get_current_user_id
from ggrc.utils import benchmark
from ggrc.snapshotter.datastructures import Attr
from ggrc.snapshotter.datastructures import Pair
from ggrc.snapshotter.datastructures import Stub
from ggrc.snapshotter.datastructures import OperationResponse
from ggrc.snapshotter.helpers import create_relationship_dict
from ggrc.snapshotter.helpers import create_relationship_revision_dict
from ggrc.snapshotter.helpers import create_snapshot_dict
from ggrc.snapshotter.helpers import create_snapshot_revision_dict
from ggrc.snapshotter.helpers import get_relationships
from ggrc.snapshotter.helpers import get_revisions
from ggrc.snapshotter.helpers import get_snapshots
from ggrc.snapshotter.indexer import reindex_pairs
from ggrc.snapshotter.rules import get_rules
class SnapshotGenerator(object):
"""Geneate snapshots per rules of all connected objects"""
def __init__(self, dry_run):
self.rules = get_rules()
self.parents = set()
self.children = set()
self.snapshots = dict()
self.context_cache = dict()
self.dry_run = dry_run
def add_parent(self, obj):
"""Add parent object and automatically scan neighborhood for snapshottable
objects."""
with benchmark("Snapshot.add_parent_object"):
key = Stub.from_object(obj)
if key not in self.parents:
with benchmark("Snapshot.add_parent_object.add object"):
objs = self._get_snapshottable_objects(obj)
self.parents.add(key)
self.context_cache[key] = obj.context_id
self.children = self.children | objs
self.snapshots[key] = objs
return self.parents
def add_family(self, parent, children):
"""Directly add parent object and children that should be snapshotted."""
_type, _id = parent
model = getattr(models, _type)
parent_object = db.session.query(model).filter(model.id == _id).one()
self.parents.add(parent)
self.snapshots[parent] = children
self.children = children
self.context_cache[parent] = parent_object.context_id
def _fetch_neighborhood(self, parent_object, objects):
with benchmark("Snapshot._fetch_object_neighborhood"):
query_pairs = set()
for obj in objects:
for snd_obj in self.rules.rules[parent_object.type]["snd"]:
query_pairs.add((obj.type, obj.id, snd_obj))
columns = db.session.query(
models.Relationship.source_type,
models.Relationship.source_id,
models.Relationship.destination_type,
models.Relationship.destination_id)
relationships = columns.filter(
tuple_(
models.Relationship.destination_type,
models.Relationship.destination_id,
models.Relationship.source_type,
).in_(query_pairs)).union(
columns.filter(tuple_(
models.Relationship.source_type,
models.Relationship.source_id,
models.Relationship.destination_type,
).in_(query_pairs)))
neighborhood = set()
for (stype, sid, dtype, did) in relationships:
source = Stub(stype, sid)
destination = Stub(dtype, did)
if source in objects:
neighborhood.add(destination)
else:
neighborhood.add(source)
return neighborhood
def _get_snapshottable_objects(self, obj):
"""Get snapshottable objects from parent object's neighborhood."""
with benchmark("Snapshot._get_snapshotable_objects"):
object_rules = self.rules.rules[obj.type]
with benchmark("Snapshot._get_snapshotable_objects.related_mappings"):
related_mappings = obj.related_objects({
rule for rule in object_rules["fst"]
if isinstance(rule, basestring)})
with benchmark("Snapshot._get_snapshotable_objects.direct mappings"):
direct_mappings = {getattr(obj, rule.name)
for rule in object_rules["fst"]
if isinstance(rule, Attr)}
related_objects = {Stub.from_object(obj)
for obj in related_mappings | direct_mappings}
with benchmark("Snapshot._get_snapshotable_objects.fetch neighborhood"):
return self._fetch_neighborhood(obj, related_objects)
def update(self, event, revisions, _filter=None):
"""Update parent object's snapshots."""
_, for_update = self.analyze()
result = self._update(for_update=for_update, event=event,
revisions=revisions, _filter=_filter)
updated = result.response
if not self.dry_run:
reindex_pairs(updated)
return result
def _update(self, for_update, event, revisions, _filter):
"""Update (or create) parent objects' snapshots and create revisions for
them.
Args:
event: A ggrc.models.Event instance
revisions: A set of tuples of pairs with revisions to which it should
either create or update a snapshot of that particular audit
_filter: Callable that should return True if it should be updated
Returns:
OperationResponse
"""
# pylint: disable=too-many-locals
with benchmark("Snapshot._update"):
user_id = get_current_user_id()
missed_keys = set()
snapshot_cache = dict()
modified_snapshot_keys = set()
data_payload_update = list()
revision_payload = list()
response_data = dict()
if self.dry_run and event is None:
event_id = 0
else:
event_id = event.id
with benchmark("Snapshot._update.filter"):
if _filter:
for_update = {elem for elem in for_update if _filter(elem)}
with benchmark("Snapshot._update.get existing snapshots"):
existing_snapshots = db.session.query(
models.Snapshot.id,
models.Snapshot.revision_id,
models.Snapshot.parent_type,
models.Snapshot.parent_id,
models.Snapshot.child_type,
models.Snapshot.child_id,
).filter(tuple_(
models.Snapshot.parent_type, models.Snapshot.parent_id,
models.Snapshot.child_type, models.Snapshot.child_id
).in_({pair.to_4tuple() for pair in for_update}))
for esnap in existing_snapshots:
sid, rev_id, pair_tuple = esnap[0], esnap[1], esnap[2:]
pair = Pair.from_4tuple(pair_tuple)
snapshot_cache[pair] = (sid, rev_id)
with benchmark("Snapshot._update.retrieve latest revisions"):
revision_id_cache = get_revisions(
for_update,
filters=[models.Revision.action.in_(["created", "modified"])],
revisions=revisions)
response_data["revisions"] = {
"old": {pair: values[1] for pair, values in snapshot_cache.items()},
"new": revision_id_cache
}
with benchmark("Snapshot._update.build snapshot payload"):
for key in for_update:
if key in revision_id_cache:
sid, rev_id = snapshot_cache[key]
latest_rev = revision_id_cache[key]
if rev_id != latest_rev:
modified_snapshot_keys.add(key)
data_payload_update += [{
"_id": sid,
"_revision_id": latest_rev,
"_modified_by_id": user_id
}]
else:
missed_keys.add(key)
if not modified_snapshot_keys:
return OperationResponse("update", True, set(), response_data)
with benchmark("Snapshot._update.write snapshots to database"):
update_sql = models.Snapshot.__table__.update().where(
models.Snapshot.id == bindparam("_id")).values(
revision_id=bindparam("_revision_id"),
modified_by_id=bindparam("_modified_by_id"))
self._execute(update_sql, data_payload_update)
with benchmark("Snapshot._update.retrieve inserted snapshots"):
snapshots = get_snapshots(modified_snapshot_keys)
with benchmark("Snapshot._update.create snapshots revision payload"):
for snapshot in snapshots:
parent = Stub.from_tuple(snapshot, 4, 5)
context_id = self.context_cache[parent]
data = create_snapshot_revision_dict("modified", event_id, snapshot,
user_id, context_id)
revision_payload += [data]
with benchmark("Insert Snapshot entries into Revision"):
self._execute(models.Revision.__table__.insert(), revision_payload)
return OperationResponse("update", True, for_update, response_data)
def analyze(self):
"""Analyze which snapshots need to be updated and which created"""
query = set(db.session.query(
models.Snapshot.parent_type,
models.Snapshot.parent_id,
models.Snapshot.child_type,
models.Snapshot.child_id,
).filter(tuple_(
models.Snapshot.parent_type, models.Snapshot.parent_id
).in_(self.parents)))
existing_scope = {Pair.from_4tuple(fields) for fields in query}
full_scope = {Pair(parent, child)
for parent, children in self.snapshots.items()
for child in children}
for_update = existing_scope
for_create = full_scope - existing_scope
return for_create, for_update
def upsert(self, event, revisions, _filter):
return self._upsert(event=event, revisions=revisions, _filter=_filter)
def _upsert(self, event, revisions, _filter):
"""Update and (if needed) create snapshots
Args:
event: A ggrc.models.Event instance
revisions: A set of tuples of pairs with revisions to which it should
either create or update a snapshot of that particular audit
_filter: Callable that should return True if it should be updated
Returns:
OperationResponse
"""
for_create, for_update = self.analyze()
create, update = None, None
created, updated = set(), set()
if for_update:
update = self._update(
for_update=for_update, event=event, revisions=revisions,
_filter=_filter)
updated = update.response
if for_create:
create = self._create(for_create=for_create, event=event,
revisions=revisions, _filter=_filter)
created = create.response
to_reindex = updated | created
if not self.dry_run:
reindex_pairs(to_reindex)
return OperationResponse("upsert", True, {
"create": create,
"update": update
}, {
"dry-run": self.dry_run
})
def _execute(self, operation, data):
"""Execute bulk operation on data if not in dry mode
Args:
operation: sqlalchemy operation
data: a list of dictionaries with keys representing column names and
values to insert with operation
Returns:
True if successful.
"""
if data and not self.dry_run:
engine = db.engine
engine.execute(operation, data)
db.session.commit()
def create(self, event, revisions, _filter=None):
"""Create snapshots of parent object's neighborhood per provided rules
and split in chuncks if there are too many snapshottable objects."""
for_create, _ = self.analyze()
result = self._create(
for_create=for_create, event=event,
revisions=revisions, _filter=_filter)
created = result.response
if not self.dry_run:
reindex_pairs(created)
return result
def _create(self, for_create, event, revisions, _filter):
"""Create snapshots of parent objects neighhood and create revisions for
snapshots.
Args:
event: A ggrc.models.Event instance
revisions: A set of tuples of pairs with revisions to which it should
either create or update a snapshot of that particular audit
_filter: Callable that should return True if it should be updated
Returns:
OperationResponse
"""
# pylint: disable=too-many-locals,too-many-statements
with benchmark("Snapshot._create"):
with benchmark("Snapshot._create init"):
user_id = get_current_user_id()
missed_keys = set()
data_payload = list()
revision_payload = list()
relationship_payload = list()
response_data = dict()
if self.dry_run and event is None:
event_id = 0
else:
event_id = event.id
with benchmark("Snapshot._create.filter"):
if _filter:
for_create = {elem for elem in for_create if _filter(elem)}
with benchmark("Snapshot._create._get_revisions"):
revision_id_cache = get_revisions(for_create, revisions)
response_data["revisions"] = revision_id_cache
with benchmark("Snapshot._create.create payload"):
for pair in for_create:
if pair in revision_id_cache:
revision_id = revision_id_cache[pair]
context_id = self.context_cache[pair.parent]
data = create_snapshot_dict(pair, revision_id, user_id, context_id)
data_payload += [data]
else:
missed_keys.add(pair)
with benchmark("Snapshot._create.write to database"):
self._execute(
models.Snapshot.__table__.insert(),
data_payload)
with benchmark("Snapshot._create.retrieve inserted snapshots"):
snapshots = get_snapshots(for_create)
with benchmark("Snapshot._create.create base object -> snapshot rels"):
for snapshot in snapshots:
base_object = Stub.from_tuple(snapshot, 6, 7)
snapshot_object = Stub("Snapshot", snapshot[0])
relationship = create_relationship_dict(base_object, snapshot_object,
user_id, snapshot[1])
relationship_payload += [relationship]
with benchmark("Snapshot._create.write relationships to database"):
self._execute(models.Relationship.__table__.insert(),
relationship_payload)
with benchmark("Snapshot._create.get created relationships"):
created_relationships = {
(rel["source_type"], rel["source_id"],
rel["destination_type"], rel["destination_id"])
for rel in relationship_payload}
relationships = get_relationships(created_relationships)
with benchmark("Snapshot._create.create revision payload"):
with benchmark("Snapshot._create.create snapshots revision payload"):
for snapshot in snapshots:
parent = Stub.from_tuple(snapshot, 4, 5)
context_id = self.context_cache[parent]
data = create_snapshot_revision_dict("created", event_id, snapshot,
user_id, context_id)
revision_payload += [data]
with benchmark("Snapshot._create.create rel revision payload"):
snapshot_parents = {pair.child: pair.parent for pair in for_create}
for relationship in relationships:
obj = Stub.from_tuple(relationship, 4, 5)
parent = snapshot_parents[obj]
context_id = self.context_cache[parent]
data = create_relationship_revision_dict(
"created", event_id, relationship, user_id, context_id)
revision_payload += [data]
with benchmark("Snapshot._create.write revisions to database"):
self._execute(models.Revision.__table__.insert(), revision_payload)
return OperationResponse("create", True, for_create, response_data)
def create_snapshots(objs, event, revisions=None, _filter=None, dry_run=False):
"""Create snapshots of parent objects."""
# pylint: disable=unused-argument
if not revisions:
revisions = set()
with benchmark("Snapshot.create_snapshots"):
with benchmark("Snapshot.create_snapshots.init"):
generator = SnapshotGenerator(dry_run)
if not isinstance(objs, set):
objs = {objs}
for obj in objs:
db.session.add(obj)
with benchmark("Snapshot.create_snapshots.add_parent_objects"):
generator.add_parent(obj)
with benchmark("Snapshot.create_snapshots.create"):
return generator.create(event=event,
revisions=revisions,
_filter=_filter)
def upsert_snapshots(objs, event, revisions=None, _filter=None, dry_run=False):
"""Update (and create if needed) snapshots of parent objects."""
# pylint: disable=unused-argument
if not revisions:
revisions = set()
with benchmark("Snapshot.update_snapshots"):
generator = SnapshotGenerator(dry_run)
if not isinstance(objs, set):
objs = {objs}
for obj in objs:
db.session.add(obj)
generator.add_parent(obj)
return generator.upsert(event=event, revisions=revisions, _filter=_filter)
|
|
# -*- coding: utf-8 -*-
# Copyright 2012-2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, date, timedelta
import logging
import sys
from bson import ObjectId
from bson.errors import InvalidId
from tastypie import fields
from tastypie import http
from tastypie.authorization import DjangoAuthorization
from tastypie.exceptions import NotFound, BadRequest
from tastypie.resources import ModelResource, Resource
from tastypie.utils.mime import build_content_type
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models import Q, Count
from django.db.models.fields.files import ImageFieldFile
from django.http import HttpResponse, HttpResponseNotFound
from django.utils import timezone
from django.utils.cache import patch_cache_control
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from moocng.api.authentication import (DjangoAuthentication,
TeacherAuthentication,
ApiKeyAuthentication,
MultiAuthentication)
from moocng.api.authorization import (PublicReadTeachersModifyAuthorization,
TeacherAuthorization,
UserResourceAuthorization)
from moocng.api.mongodb import (MongoObj, MongoResource, MongoUserResource,
mongo_object_updated, mongo_object_created)
from moocng.api.tasks import (on_activity_created_task, on_answer_created_task,
on_answer_updated_task,
on_peerreviewsubmission_created_task, on_history_created_task)
from moocng.api.validation import (AnswerValidation, answer_validate_date,
PeerReviewSubmissionsResourceValidation)
from moocng.assets.models import Asset, Reservation, AssetAvailability
from moocng.assets.utils import get_occupation_for_month
from moocng.courses.models import (Unit, KnowledgeQuantum, Question, Option,
Attachment, Transcription, Course, Language)
from moocng.courses.marks import (normalize_kq_weight, calculate_course_mark, get_unit_mark,
get_kq_mark)
from moocng.media_contents import (media_content_get_iframe_template,
media_content_get_thumbnail_url)
from moocng.mongodb import get_db
from moocng.peerreview.models import PeerReviewAssignment, EvaluationCriterion
from moocng.peerreview.utils import (kq_get_peer_review_score,
get_peer_review_review_score)
from moocng.x_api.utils import (learnerSubmitsAResource)
from django.core.urlresolvers import reverse
STATS_QUEUE = 'stats'
class HandleErrorProvider(object):
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
response_class_error = None
message_error = None
message_error_default = _('Sorry, this request could not be processed. Please try again later.')
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
if request.is_ajax():
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
response_class_error = http.HttpBadRequest
except ValidationError, e:
response_class_error = http.HttpBadRequest
message_error = ', '.join(e.messages)
except (NotFound, ObjectDoesNotExist), e:
response_class_error = HttpResponseNotFound
except Exception, e:
# This exception could be an error with sensitive information
message_error = message_error_default
if hasattr(e, 'response'):
return e.response
if message_error is None:
message_error = e.message
if not message_error:
message_error = message_error_default
if response_class_error is None:
response_class_error = http.HttpApplicationError
data = {
"error_message": message_error,
}
if response_class_error != HttpResponseNotFound:
#log = logging.getLogger('moocng.api.resources')
logging.error('Internal Server Error: %s' % request.path, exc_info=sys.exc_info(),
extra={'status_code': 500, 'request': request})
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class_error(content=serialized, content_type=build_content_type(desired_format))
return wrapper
class BaseResource(Resource, HandleErrorProvider):
def wrap_view(self, view):
return HandleErrorProvider.wrap_view(self, view)
class BaseModelResource(ModelResource, HandleErrorProvider):
def wrap_view(self, view):
return HandleErrorProvider.wrap_view(self, view)
class BaseMongoResource(MongoResource, HandleErrorProvider):
def wrap_view(self, view):
return HandleErrorProvider.wrap_view(self, view)
class BaseMongoUserResource(MongoUserResource, HandleErrorProvider):
def wrap_view(self, view):
return HandleErrorProvider.wrap_view(self, view)
class CourseResource(BaseModelResource):
unit = fields.ToManyField('moocng.api.resources.UnitResource', 'unit_set')
class Meta:
queryset = Course.objects.all()
resource_name = 'course'
allowed_methods = ['get']
excludes = ['certification_banner']
authentication = MultiAuthentication(DjangoAuthentication(),
ApiKeyAuthentication())
authorization = DjangoAuthorization()
class UnitResource(BaseModelResource):
course = fields.ToOneField(CourseResource, 'course')
mark = fields.DecimalField(readonly=True)
class Meta:
queryset = Unit.objects.all()
resource_name = 'unit'
authentication = DjangoAuthentication()
authorization = PublicReadTeachersModifyAuthorization()
always_return_data = True
filtering = {
"course": ('exact'),
}
def dehydrate_mark(self, bundle):
mark = 0;
try:
mark = get_unit_mark(bundle.obj, bundle.request.user)
except:
pass
return mark
def alter_deserialized_detail_data(self, request, data):
if u'title' in data and data[u'title'] is not None:
data[u'title'] = data[u'title'].strip()
return data
class KnowledgeQuantumResource(BaseModelResource):
unit = fields.ToOneField(UnitResource, 'unit')
question = fields.ToManyField('moocng.api.resources.QuestionResource',
'question_set', related_name='kq',
readonly=True, null=True)
iframe_code = fields.CharField(readonly=True)
thumbnail_url = fields.CharField(readonly=True)
peer_review_assignment = fields.ToOneField(
'moocng.api.resources.PeerReviewAssignmentResource',
'peerreviewassignment',
related_name='peer_review_assignment',
readonly=True, null=True)
asset_availability = fields.ToOneField(
'moocng.api.resources.AssetAvailabilityResource',
'asset_availability',
related_name='asset_availability',
readonly=True, null=True)
peer_review_score = fields.IntegerField(readonly=True)
correct = fields.BooleanField(readonly=True)
completed = fields.BooleanField(readonly=True)
marked = fields.BooleanField(readonly=True)
mark = fields.DecimalField(readonly=True)
normalized_weight = fields.IntegerField(readonly=True)
transcriptions = fields.ToManyField('moocng.api.resources.TranscriptionResource',
'transcription_set', related_name='kq',
readonly=True, null=True)
class Meta:
queryset = KnowledgeQuantum.objects.all()
resource_name = 'kq'
allowed_methods = ['get']
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
filtering = {
"unit": ('exact'),
}
def get_object_list(self, request):
objects = super(KnowledgeQuantumResource, self).get_object_list(request)
return objects.filter(
Q(unit__unittype='n') |
Q(unit__start__isnull=True) |
Q(unit__start__isnull=False) #, unit__start__lte=datetime.now)
)
def dispatch(self, request_type, request, **kwargs):
db = get_db()
self.answers = db.get_collection('answers')
self.activity = db.get_collection('activity')
return super(KnowledgeQuantumResource, self).dispatch(request_type,
request,
**kwargs)
def dehydrate_normalized_weight(self, bundle):
return normalize_kq_weight(bundle.obj)
def dehydrate_question(self, bundle):
question = bundle.data['question']
if len(question) == 0:
return None
else:
return question[0]
def dehydrate_iframe_code(self, bundle):
return media_content_get_iframe_template(bundle.obj.media_content_type,
bundle.obj.media_content_id,
**{'transcriptions': bundle.obj.transcription_set.all()})
def dehydrate_thumbnail_url(self, bundle):
return media_content_get_thumbnail_url(bundle.obj.media_content_type,
bundle.obj.media_content_id)
def dehydrate_peer_review_score(self, bundle):
return kq_get_peer_review_score(bundle.obj, bundle.request.user)
def dehydrate_correct(self, bundle):
questions = bundle.obj.question_set.all()
if questions.count() == 0:
# no question: a kq is correct if it is completed
try:
return bundle.obj.is_completed(bundle.request.user)
except AttributeError:
return False
else:
question = questions[0] # there should be only one question
answer = self.answers.find_one({
"user_id": bundle.request.user.id,
"question_id": question.id
})
if not answer:
return False
return question.is_correct(answer)
def dehydrate_completed(self, bundle):
return bundle.obj.is_completed(bundle.request.user)
def dehydrate_marked(self, bundle):
current_mark = bundle.obj.unit.course.get_user_mark(bundle.request.user)
if current_mark is None:
return False
else:
return current_mark.id == bundle.obj.id
def dehydrate_mark(self, bundle):
return get_kq_mark(bundle.obj, bundle.request.user)
def dehydrate_course(self, bundle):
return bundle.obj.unit.course.id
def build_filters(self, filters=None):
if filters is None: #if you don't pass any filters at all
filters = {}
orm_filters = super(KnowledgeQuantumResource, self).build_filters(filters)
if('course' in filters):
query = filters['course']
qset = (
Q(unit__course__id=query)
)
orm_filters['course'] = qset
return orm_filters
def apply_filters(self, request, applicable_filters):
if 'course' in applicable_filters:
course = applicable_filters.pop('course')
else:
course = None
semi_filtered = super(KnowledgeQuantumResource, self).apply_filters(request, applicable_filters)
return semi_filtered.filter(course) if course else semi_filtered
class PrivateKnowledgeQuantumResource(BaseModelResource):
unit = fields.ToOneField(UnitResource, 'unit')
question = fields.ToManyField('moocng.api.resources.QuestionResource',
'question_set', related_name='kq',
readonly=True, null=True)
iframe_code = fields.CharField(readonly=True)
thumbnail_url = fields.CharField(readonly=True)
peer_review_assignment = fields.ToOneField(
'moocng.api.resources.PeerReviewAssignmentResource',
'peerreviewassignment',
related_name='peer_review_assignment',
readonly=True, null=True)
asset_availability = fields.ToOneField(
'moocng.api.resources.AssetAvailabilityResource',
'asset_availability',
related_name='asset_availability',
readonly=True, null=True)
normalized_weight = fields.IntegerField()
transcriptions = fields.ToManyField('moocng.api.resources.TranscriptionResource',
'transcription_set', related_name='kq',
readonly=True, null=True)
class Meta:
queryset = KnowledgeQuantum.objects.all()
resource_name = 'privkq'
always_return_data = True
authentication = TeacherAuthentication()
authorization = TeacherAuthorization()
filtering = {
"unit": ('exact'),
}
def alter_deserialized_detail_data(self, request, data):
if u'title' in data and data[u'title'] is not None:
data[u'title'] = data[u'title'].strip()
return data
def dehydrate_normalized_weight(self, bundle):
return normalize_kq_weight(bundle.obj)
def dehydrate_question(self, bundle):
question = bundle.data['question']
if len(question) == 0:
return None
else:
return question[0]
def dehydrate_iframe_code(self, bundle):
return media_content_get_iframe_template(bundle.obj.media_content_type, bundle.obj.media_content_id)
def dehydrate_thumbnail_url(self, bundle):
return media_content_get_thumbnail_url(bundle.obj.media_content_type, bundle.obj.media_content_id)
class LanguageResource(BaseModelResource):
class Meta:
queryset = Language.objects.all()
resource_name = 'language'
authentication = DjangoAuthentication()
authorization = PublicReadTeachersModifyAuthorization()
filtering = {
"name": ('exact'),
}
class AttachmentResource(BaseModelResource):
kq = fields.ToOneField(KnowledgeQuantumResource, 'kq')
class Meta:
queryset = Attachment.objects.all()
resource_name = 'attachment'
authentication = DjangoAuthentication()
authorization = PublicReadTeachersModifyAuthorization()
filtering = {
"kq": ('exact'),
}
def dehydrate_attachment(self, bundle):
return bundle.obj.attachment.url
class TranscriptionResource(BaseModelResource):
kq = fields.ToOneField(KnowledgeQuantumResource, 'kq')
language = fields.ToOneField(LanguageResource, 'language')
class Meta:
queryset = Transcription.objects.all()
resource_name = 'transcription'
authentication = DjangoAuthentication()
authorization = PublicReadTeachersModifyAuthorization()
filtering = {
"kq": ('exact'),
}
def dehydrate_transcription(self, bundle):
return bundle.obj.transcription.url
class PeerReviewAssignmentResource(BaseModelResource):
kq = fields.ToOneField(KnowledgeQuantumResource, 'kq')
class Meta:
queryset = PeerReviewAssignment.objects.all()
resource_name = 'peer_review_assignment'
allowed_methods = ['get']
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
filtering = {
"kq": ('exact'),
"unit": ('exact'),
}
def get_object_list(self, request):
objects = super(PeerReviewAssignmentResource, self).get_object_list(request)
unit = request.GET.get('unit', None)
if unit is not None:
objects = objects.filter(kq__unit_id=unit)
return objects.filter(
Q(kq__unit__unittype='n') |
Q(kq__unit__start__isnull=True) |
Q(kq__unit__start__isnull=False, kq__unit__start__lte=datetime.now)
)
class PrivatePeerReviewAssignmentResource(BaseModelResource):
kq = fields.ToOneField(KnowledgeQuantumResource, 'kq')
class Meta:
queryset = PeerReviewAssignment.objects.all()
resource_name = 'privpeer_review_assignment'
authentication = TeacherAuthentication()
authorization = TeacherAuthorization()
filtering = {
"kq": ('exact'),
}
class EvaluationCriterionResource(BaseModelResource):
assignment = fields.ToOneField(PeerReviewAssignmentResource, 'assignment')
class Meta:
queryset = EvaluationCriterion.objects.all()
resource_name = 'evaluation_criterion'
allowed_methods = ['get']
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
filtering = {
"assignment": ('exact'),
"unit": ('exact'),
}
def obj_get_list(self, request=None, **kwargs):
assignment = request.GET.get('assignment', None)
unit = request.GET.get('unit', None)
if assignment is not None:
results = EvaluationCriterion.objects.filter(assignment_id=assignment)
elif unit is not None:
results = EvaluationCriterion.objects.filter(assignment__kq__unit_id=unit)
else:
results = EvaluationCriterion.objects.all()
return results.filter(
Q(assignment__kq__unit__unittype='n') |
Q(assignment__kq__unit__start__isnull=True) |
Q(assignment__kq__unit__start__isnull=False,
assignment__kq__unit__start__lte=datetime.now)
)
class PrivateEvaluationCriterionResource(BaseModelResource):
assignment = fields.ToOneField(PeerReviewAssignmentResource, 'assignment')
class Meta:
queryset = EvaluationCriterion.objects.all()
resource_name = 'privevaluation_criterion'
authentication = TeacherAuthentication()
authorization = TeacherAuthorization()
filtering = {
"assignment": ('exact'),
"unit": ('exact'),
}
def alter_deserialized_detail_data(self, request, data):
if u'title' in data and data[u'title'] is not None:
data[u'title'] = data[u'title'].strip()
if u'description' in data and data[u'description'] is not None:
data[u'description'] = data[u'description'].strip()
return data
def obj_get_list(self, request=None, **kwargs):
assignment = request.GET.get('assignment', None)
unit = request.GET.get('unit', None)
if assignment is not None:
results = EvaluationCriterion.objects.filter(assignment_id=assignment)
elif unit is not None:
results = EvaluationCriterion.objects.filter(assignment__kq__unit_id=unit)
else:
results = EvaluationCriterion.objects.all()
return results
class PeerReviewSubmissionsResource(BaseMongoResource):
class Meta:
resource_name = 'peer_review_submissions'
collection = 'peer_review_submissions'
datakey = 'peer_review_submissions'
object_class = MongoObj
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
validation = PeerReviewSubmissionsResourceValidation()
allowed_methods = ['get', 'post']
filtering = {
"kq": ('exact'),
"unit": ('exact'),
"course": ('exact'),
}
def obj_get_list(self, request=None, **kwargs):
mongo_query = {
"author": request.GET.get('author', request.user.id)
}
for key in self._meta.filtering.keys():
if key in request.GET:
try:
mongo_query[key] = int(request.GET.get(key))
except ValueError:
mongo_query[key] = request.GET.get(key)
query_results = self._collection.find(mongo_query)
results = []
for query_item in query_results:
obj = MongoObj(initial=query_item)
obj.uuid = query_item["_id"]
results.append(obj)
return results
def obj_get(self, request=None, **kwargs):
try:
query = dict(_id=ObjectId(kwargs['pk']))
except InvalidId:
raise BadRequest('Invalid ObjectId provided')
mongo_item = self._collection.find_one(query)
if mongo_item is None:
raise NotFound('Invalid resource lookup data provided')
obj = MongoObj(initial=mongo_item)
obj.uuid = kwargs['pk']
return obj
def obj_create(self, bundle, request=None, **kwargs):
bundle = self.full_hydrate(bundle)
if "bundle" not in bundle.data and "reviews" not in bundle.data:
kq = KnowledgeQuantum.objects.get(id=int(bundle.data["kq"]))
if "unit" not in bundle.data:
bundle.data["unit"] = kq.unit.id
if "course" not in bundle.data:
bundle.data["course"] = kq.unit.course.id
if "created" not in bundle.data:
bundle.data["created"] = datetime.utcnow()
bundle.data["reviews"] = 0
bundle.data["author_reviews"] = 0
bundle.data["author"] = request.user.id
bundle.data["language"] = request.user.get_profile().language
from moocng.peerreview.utils import insert_p2p_if_does_not_exists_or_raise
_id = insert_p2p_if_does_not_exists_or_raise(bundle.data, self._collection)
bundle.obj = MongoObj(bundle.data)
try:
geolocation = {
'lat': float(request.META['HTTP_GEO_LAT']),
'lon': float(request.META['HTTP_GEO_LON'])
}
except:
geolocation = {
'lat': 0.0,
'lon': 0.0
}
bundle.extra = {
'geolocation': geolocation
}
self.send_created_signal(request.user.id, bundle)
bundle.obj.uuid = str(_id)
bundle.uuid = bundle.obj.uuid
return bundle
class PeerReviewReviewsResource(BaseMongoResource):
score = fields.IntegerField(readonly=True)
class Meta:
resource_name = 'peer_review_reviews'
collection = 'peer_review_reviews'
datakey = 'peer_review_reviews'
object_class = MongoObj
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
allowed_methods = ['get']
filtering = {
"reviewer": ('exact'),
"kq": ('exact'),
"unit": ('exact'),
"course": ('exact'),
"submission_id": ('exact'),
}
def obj_get_list(self, request=None, **kwargs):
mongo_query = {
"author": request.GET.get('author', request.user.id)
}
for key in self._meta.filtering.keys():
if key in request.GET:
try:
mongo_query[key] = int(request.GET.get(key))
except ValueError:
mongo_query[key] = request.GET.get(key)
query_results = self._collection.find(mongo_query)
results = []
for query_item in query_results:
obj = MongoObj(initial=query_item)
obj.uuid = query_item["_id"]
results.append(obj)
return results
def obj_get(self, request=None, **kwargs):
try:
query = dict(_id=ObjectId(kwargs['pk']))
except InvalidId:
raise BadRequest('Invalid ObjectId provided')
mongo_item = self._collection.find_one(query)
if mongo_item is None:
raise NotFound('Invalid resource lookup data provided')
obj = MongoObj(initial=mongo_item)
obj.uuid = kwargs['pk']
return obj
def dehydrate_score(self, bundle):
return get_peer_review_review_score(bundle.obj.to_dict())
class QuestionResource(BaseModelResource):
kq = fields.ToOneField(KnowledgeQuantumResource, 'kq')
iframe_code = fields.CharField(readonly=True)
thumbnail_url = fields.CharField(readonly=True)
class Meta:
queryset = Question.objects.all()
resource_name = 'question'
allowed_methods = ['get']
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
filtering = {
"kq": ('exact'),
}
def get_object_list(self, request):
objects = super(QuestionResource, self).get_object_list(request)
return objects.filter(
Q(kq__unit__unittype='n') |
Q(kq__unit__start__isnull=True) |
Q(kq__unit__start__isnull=False, kq__unit__start__lte=datetime.now)
)
def dehydrate_solution_media_content_type(self, bundle):
# Only return solution if the deadline has been reached, or there is
# no deadline
unit = bundle.obj.kq.unit
if (unit.unittype != 'n' and
unit.deadline > datetime.now(unit.deadline.tzinfo)):
return None
return bundle.obj.solution_media_content_type
def dehydrate_solution_media_content_id(self, bundle):
# Only return solution if the deadline has been reached, or there is
# no deadline
unit = bundle.obj.kq.unit
if (unit.unittype != 'n' and
unit.deadline > datetime.now(unit.deadline.tzinfo)):
return None
return bundle.obj.solution_media_content_id
def dehydrate_solution_text(self, bundle):
# Only return solution if the deadline has been reached, or there is
# no deadline
unit = bundle.obj.kq.unit
if (unit.unittype != 'n' and
unit.deadline > datetime.now(unit.deadline.tzinfo)):
return None
return bundle.obj.solution_text
def dehydrate_last_frame(self, bundle):
try:
return bundle.obj.last_frame.url
except ValueError:
return "%simg/no-image.png" % settings.STATIC_URL
def dehydrate_iframe_code(self, bundle):
return media_content_get_iframe_template(
bundle.obj.solution_media_content_type,
bundle.obj.solution_media_content_id
)
def dehydrate_thumbnail_url(self, bundle):
return media_content_get_thumbnail_url(
bundle.obj.solution_media_content_type,
bundle.obj.solution_media_content_id
)
class PrivateQuestionResource(BaseModelResource):
kq = fields.ToOneField(PrivateKnowledgeQuantumResource, 'kq')
class Meta:
queryset = Question.objects.all()
resource_name = 'privquestion'
authentication = TeacherAuthentication()
authorization = TeacherAuthorization()
always_return_data = True
filtering = {
"kq": ('exact'),
}
def dehydrate_last_frame(self, bundle):
try:
return bundle.obj.last_frame.url
except ValueError:
return "%simg/no-image.png" % settings.STATIC_URL
def hydrate(self, bundle):
try:
bundle.obj.last_frame.file
except ValueError:
bundle.obj.last_frame = ImageFieldFile(
bundle.obj, Question._meta.get_field_by_name('last_frame')[0],
"")
return bundle
class OptionResource(BaseModelResource):
question = fields.ToOneField(QuestionResource, 'question')
class Meta:
queryset = Option.objects.all()
resource_name = 'option'
allowed_methods = ['get']
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
filtering = {
"question": ('exact'),
}
def get_object_list(self, request):
objects = super(OptionResource, self).get_object_list(request)
return objects.filter(
Q(question__kq__unit__unittype='n') |
Q(question__kq__unit__start__isnull=True) |
Q(question__kq__unit__start__isnull=False,
question__kq__unit__start__lte=datetime.now)
)
def dispatch(self, request_type, request, **kwargs):
# We need the request to dehydrate some fields
try:
question_id = int(request.GET.get("question", None))
except ValueError:
raise BadRequest("question filter isn't a integer value")
collection = get_db().get_collection('answers')
self.answer = collection.find_one({
"user_id": request.user.id,
"question_id": question_id
})
return super(OptionResource, self).dispatch(request_type, request,
**kwargs)
def dehydrate_solution(self, bundle):
# Only return the solution if the user has given an answer
# If there is a deadline, then only return the solution if the deadline
# has been reached too
if self.answer:
unit = bundle.obj.question.kq.unit
if (unit.unittype == 'n' or
not(unit.deadline and
datetime.now(unit.deadline.tzinfo) > unit.deadline)):
return bundle.obj.solution
def dehydrate_feedback(self, bundle):
# Only return the feedback if the user has given an answer
if self.answer:
return bundle.obj.feedback
class AnswerResource(BaseMongoUserResource):
course_id = fields.IntegerField(null=False)
unit_id = fields.IntegerField(null=False)
kq_id = fields.IntegerField(null=False)
question_id = fields.IntegerField(null=False)
date = fields.DateTimeField(readonly=True, default=datetime.now)
replyList = fields.ListField(null=False)
class Meta:
resource_name = 'answer'
collection = 'answers'
datakey = 'question_id'
object_class = MongoObj
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
allowed_methods = ['get', 'post', 'put']
filtering = {
"question_id": ('exact'),
"course_id": ('exact'),
"unit_id": ('exact'),
"kq_id": ('exact'),
}
validation = AnswerValidation()
input_schema = {
"course_id": 1,
"unit_id": 1,
"kq_id": 1,
"question_id": 1,
"replyList": 1,
"user_id": 0,
"date": 0,
}
def obj_create(self, bundle, request=None, **kwargs):
bundle.data["date"] = datetime.utcnow()
# xAPI
try:
geolocation = {
'lat': float(request.META['HTTP_GEO_LAT']),
'lon': float(request.META['HTTP_GEO_LON'])
}
except:
geolocation = {
'lat': 0.0,
'lon': 0.0
}
bundle.extra = {
'geolocation': geolocation
}
bundle = super(AnswerResource, self).obj_create(bundle, request)
bundle.uuid = bundle.obj.question_id
return bundle
def obj_update(self, bundle, request=None, **kwargs):
answer_validate_date(bundle, request)
print "Validando..."
question_id = int(kwargs.get("pk"))
if (len(bundle.data.get("replyList", [])) > 0):
newobj = self._collection.find_and_modify({
'user_id': request.user.id,
'question_id': question_id,
}, update={
"$set": {
'replyList': bundle.data.get("replyList"),
"date": datetime.utcnow(),
}
}, safe=True, new=True)
bundle.obj = newobj
# xAPI
try:
geolocation = {
'lat': float(request.META['HTTP_GEO_LAT']),
'lon': float(request.META['HTTP_GEO_LON'])
}
except:
geolocation = {
'lat': 0.0,
'lon': 0.0
}
bundle.extra = {
'geolocation': geolocation
}
self.send_updated_signal(request.user.id, bundle)
return bundle
class ActivityResource(BaseMongoUserResource):
course_id = fields.IntegerField(null=False)
unit_id = fields.IntegerField(null=False)
kq_id = fields.IntegerField(null=False)
timestamp = fields.IntegerField(null=False)
lat = fields.FloatField()
lon = fields.FloatField()
class Meta:
resource_name = 'activity'
collection = 'activity'
datakey = 'kq_id'
object_class = MongoObj
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
allowed_methods = ['get', 'post']
filtering = {
"course_id": ('exact'),
"unit_id": ('exact'),
"kq_id": ('exact'),
}
input_schema = {
"course_id": 1,
"unit_id": 1,
"kq_id": 1,
"user_id": 1,
"timestamp": 1,
"lat": 0.0,
"lon": 0.0
}
def _initial(self, request, **kwargs):
course_id = kwargs['pk']
return {
"course_id": course_id,
"unit_id": -1,
"kq_id": -1,
"user_id": -1,
"timestamp": -1,
"lat": 0.0,
"lon": 0.0
}
class HistoryResource(BaseMongoUserResource):
url = fields.CharField(null=False)
timestamp = fields.IntegerField(null=False)
lat = fields.FloatField()
lon = fields.FloatField()
dev_type = fields.CharField(null=False)
dev_os = fields.CharField(null=False)
dev_orientation = fields.CharField(null=False)
course_id = fields.CharField(null=False)
class Meta:
resource_name = 'history'
collection = 'history'
datakey = '_id'
object_class = MongoObj
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
allowed_methods = ['get', 'post']
filtering = {
"user_id": ('exact'),
"url": ('exact'),
"dev_type": ('exact'),
"dev_os": ('exact'),
"dev_orientation": ('exact'),
}
input_schema = {
"user_id": 1,
"url": "",
"timestamp": 1,
"lat": 1.0,
"lon": 1.0,
"dev_type": "",
"dev_os": "",
"dev_orientation": "",
"course_id": ""
}
def _initial(self, request, **kwargs):
print request
print kwargs
user_id = kwargs['pk']
return {
"user_id": -1,
"url": "",
"timestamp": -1,
"lat": 0.0,
"lon": 0.0,
"dev_type": "",
"dev_os": "",
"dev_orientation": "",
"course_id": "",
}
class UserResource(BaseModelResource):
class Meta:
resource_name = 'user'
queryset = User.objects.all()
allowed_methods = ['get']
authentication = MultiAuthentication(TeacherAuthentication(),
ApiKeyAuthentication())
authorization = UserResourceAuthorization()
fields = ['id', 'email', 'first_name', 'last_name']
filtering = {
'first_name': ['istartswith'],
'last_name': ['istartswith'],
'email': ('iexact')
}
def apply_filters(self, request, applicable_filters):
applicable_filters = applicable_filters.items()
if len(applicable_filters) > 0:
Qfilter = Q(applicable_filters[0])
for apfilter in applicable_filters[1:]:
Qfilter = Qfilter | Q(apfilter)
return self.get_object_list(request).filter(Qfilter)
else:
return self.get_object_list(request)
def override_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>[^/]+)/allcourses/$" % self._meta.resource_name,
self.wrap_view('get_courses'), name="get_courses_as_student"),
url(r"^(?P<resource_name>%s)/(?P<pk>[^/]+)/passedcourses/$" % self._meta.resource_name,
self.wrap_view('get_passed_courses'),
name="get_passed_courses_as_student"),
]
def get_object(self, request, kwargs):
try:
if not kwargs['pk'].isdigit():
return User.objects.get(email__iexact=kwargs['pk'])
else:
return self.cached_obj_get(request=request,
**self.remove_api_resource_names(kwargs))
except self.Meta.object_class.DoesNotExist:
raise NotFound('User does not exist')
def alt_get_list(self, request, courses):
resource = CourseResource()
sorted_objects = resource.apply_sorting(courses,
options=request.GET)
paginator = resource._meta.paginator_class(
request.GET, sorted_objects,
resource_uri=resource.get_resource_list_uri(),
limit=resource._meta.limit)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [resource.build_bundle(obj=obj, request=request)
for obj in to_be_serialized['objects']]
to_be_serialized['objects'] = [resource.full_dehydrate(bundle)
for bundle in bundles]
to_be_serialized = resource.alter_list_data_to_serialize(
request, to_be_serialized)
return resource.create_response(request, to_be_serialized)
def get_courses(self, request, **kwargs):
self.is_authenticated(request)
self.is_authorized(request)
obj = self.get_object(request, kwargs)
if isinstance(obj, HttpResponse):
return obj
courses = obj.courses_as_student.all()
return self.alt_get_list(request, courses)
def get_passed_courses(self, request, **kwargs):
# In tastypie, the override_urls don't call
# Authentication/Authorization
self.is_authenticated(request)
self.is_authorized(request)
obj = self.get_object(request, kwargs)
if isinstance(obj, HttpResponse):
return obj
passed_courses = []
if 'courseid' in request.GET:
courseid = int(request.GET.get('courseid'))
courses = obj.courses_as_student.filter(id=courseid)
else:
courses = obj.courses_as_student.all()
for course in courses:
if course.threshold is not None:
total_mark, units_info = calculate_course_mark(course, obj)
if float(course.threshold) <= total_mark:
passed_courses.append(course)
return self.alt_get_list(request, passed_courses)
class AssetResource(BaseModelResource):
available_in = fields.ToManyField('moocng.api.resources.AssetAvailabilityResource', 'available_in')
class Meta:
queryset = Asset.objects.all()
resource_name = 'asset'
allowed_methods = ['get']
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
filtering = {
"available_in": ('exact'),
}
def obj_get_list(self, request=None, **kwargs):
availability = request.GET.get('availability', None)
kq = request.GET.get('kq', None)
exclude_kq = request.GET.get('exclude_kq', None)
course = request.GET.get('course', None)
if availability is not None:
results = Asset.objects.filter(available_in__id=availability)
elif kq is not None:
results = Asset.objects.filter(available_in__kq__id=kq)
elif exclude_kq is not None:
results = Asset.objects.exclude(available_in__kq__id=exclude_kq)
elif course is not None:
results = Asset.objects.filter(available_in__kq__unit__course__id=course)
else:
results = Asset.objects.all()
return results
class PrivateAssetResource(BaseModelResource):
available_in = fields.ToManyField('moocng.api.resources.AssetAvailabilityResource', 'available_in')
class Meta:
queryset = Asset.objects.all()
resource_name = 'privasset'
authentication = TeacherAuthentication()
authorization = TeacherAuthorization()
filtering = {
"available_in": ('exact'),
}
def obj_get_list(self, request=None, **kwargs):
availability = request.GET.get('availability', None)
kq = request.GET.get('kq', None)
exclude_kq = request.GET.get('exclude_kq', None)
course = request.GET.get('course', None)
if availability is not None:
results = Asset.objects.filter(available_in__id=availability)
elif kq is not None:
results = Asset.objects.filter(available_in__kq__id=kq)
elif exclude_kq is not None:
results = Asset.objects.exclude(available_in__kq__id=exclude_kq)
elif course is not None:
results = Asset.objects.filter(available_in__kq__unit__course__id=course)
else:
results = Asset.objects.all()
return results
class AssetAvailabilityResource(BaseModelResource):
kq = fields.ToOneField(KnowledgeQuantumResource, 'kq')
assets = fields.ToManyField(AssetResource, 'assets')
can_be_booked = fields.BooleanField(readonly=True)
max_reservations_pending = fields.IntegerField(readonly=True)
max_reservations_total = fields.IntegerField(readonly=True)
class Meta:
queryset = AssetAvailability.objects.all()
resource_name = 'asset_availability'
allowed_methods = ['get']
authentication = DjangoAuthentication()
authorization = DjangoAuthorization()
filtering = {
"kq": ('exact'),
"assets": ('exact'),
}
def dehydrate_max_reservations_pending(self, bundle):
return bundle.obj.kq.unit.course.max_reservations_pending
def dehydrate_max_reservations_total(self, bundle):
return bundle.obj.kq.unit.course.max_reservations_total
def dehydrate_can_be_booked(self, bundle):
if bundle.obj.available_to < date.today():
return False
else:
return True
def obj_get_list(self, request=None, **kwargs):
kq = request.GET.get('kq', None)
asset = request.GET.get('asset', None)
if kq is not None and asset is not None:
results = AssetAvailability.objects.filter(Q(kq__id=kq) & Q(assets__available_in__id=asset))
elif kq is not None:
results = AssetAvailability.objects.filter(kq__id=kq)
elif asset is not None:
results = AssetAvailability.objects.filter(assets__available_in__id=asset)
else:
results = AssetAvailability.objects.all()
return results
class PrivateAssetAvailabilityResource(BaseModelResource):
kq = fields.ToOneField(KnowledgeQuantumResource, 'kq')
assets = fields.ToManyField(AssetResource, 'assets')
can_be_booked = fields.BooleanField(readonly=True)
max_reservations_pending = fields.IntegerField(readonly=True)
max_reservations_total = fields.IntegerField(readonly=True)
class Meta:
queryset = AssetAvailability.objects.all()
resource_name = 'privasset_availability'
always_return_data = True
authentication = TeacherAuthentication()
authorization = TeacherAuthorization()
filtering = {
"kq": ('exact'),
"assets": ('exact'),
}
def dehydrate_max_reservations_pending(self, bundle):
return bundle.obj.kq.unit.course.max_reservations_pending
def dehydrate_max_reservations_total(self, bundle):
return bundle.obj.kq.unit.course.max_reservations_total
def dehydrate_can_be_booked(self, bundle):
if isinstance(bundle.obj.available_to, datetime):
bundle.obj.available_to = bundle.obj.available_to.date()
if bundle.obj.available_to < date.today():
return False
else:
return True
def obj_get_list(self, request=None, **kwargs):
kq = request.GET.get('kq', None)
asset = request.GET.get('asset', None)
if kq is not None and asset is not None:
results = AssetAvailability.objects.filter(Q(kq__id=kq) & Q(assets__available_in__id=asset))
elif kq is not None:
results = AssetAvailability.objects.filter(kq__id=kq)
elif asset is not None:
results = AssetAvailability.objects.filter(assets__available_in__id=asset)
else:
results = AssetAvailability.objects.all()
return results
class ReservationResource(BaseModelResource):
user = fields.ToOneField(UserResource, 'user')
asset = fields.ToOneField(AssetResource, 'asset')
reserved_from = fields.ToOneField(AssetAvailabilityResource, 'reserved_from')
remaining_time = fields.IntegerField(readonly=True)
active_in = fields.IntegerField(readonly=True)
class Meta:
queryset = Reservation.objects.all()
resource_name = 'reservation'
allowed_methods = ['get']
authentication = MultiAuthentication(DjangoAuthentication(),
ApiKeyAuthentication())
authorization = DjangoAuthorization()
filtering = {
"asset": ('exact'),
"user": ('exact'),
"reserved_from": ('exact'),
}
def dehydrate_active_in(self, bundle):
reservation_begins = bundle.obj.reservation_begins.replace(tzinfo=None)
dif = (reservation_begins - datetime.utcnow())
return dif.seconds + dif.days * 86400
def dehydrate_remaining_time(self, bundle):
reservation_begins = bundle.obj.reservation_begins.replace(tzinfo=None)
reservation_ends = bundle.obj.reservation_ends.replace(tzinfo=None)
now = max(datetime.utcnow(), reservation_begins)
dif = (reservation_ends - now)
return dif.seconds + dif.days * 86400
def obj_get_list(self, request=None, **kwargs):
asset = request.GET.get('asset', None)
user = request.GET.get('user', None)
kq = request.GET.get('kq', None)
results = Reservation.objects.all()
if asset is not None:
results = results.filter(asset__id=asset)
if user is not None:
results = results.filter(user__id=user)
if kq is not None:
results = results.filter(reserved_from__kq=kq)
return results
class ReservationCount(BaseResource):
count = fields.IntegerField(readonly=True)
reservation_begins = fields.DateTimeField(readonly=True)
class Meta:
resource_name = 'reservation_count'
allowed_methods = 'get'
def dehydrate_count(self, bundle):
return bundle.obj['reservation_begins__count']
def dehydrate_reservation_begins(self, bundle):
base_date = bundle.obj['reservation_begins']
return timezone.localtime(base_date, timezone.get_default_timezone())
def obj_get(self, request, **kwargs):
#Information can only be obtained if asking for a list
return {'reservation_begins': '', 'reservation_begins__count': ''}
def obj_get_list(self, request, **kwargs):
asset_id = request.GET.get('asset', None)
ret = Reservation.objects.filter(asset__id=asset_id)
try:
date = datetime.strptime(request.GET.get('date', None), '%Y-%m-%d')
ret = ret.filter(Q(reservation_begins__gte=date) &
Q(reservation_begins__lt=(date + timedelta(1))))
except:
return []
ret = ret.values('reservation_begins').order_by('reservation_begins')
ret = ret.annotate(Count('reservation_begins'))
return ret
class OccupationInformation(BaseResource):
day = fields.IntegerField(readonly=True)
occupation = fields.DecimalField(readonly=True)
class Meta:
resource_name = 'occupation_information'
allowed_methods = 'get'
def dehydrate_day(self, bundle):
return int(bundle.obj[0])
def dehydrate_occupation(self, bundle):
return bundle.obj[1]
def obj_get(self, request, **kwargs):
#Information can only be obtained if asking for a list
return {'day': '', 'occupation': ''}
def obj_get_list(self, request, **kwargs):
try:
asset_id = int(request.GET.get('asset', ''))
month = int(request.GET.get('month', ''))
year = int(request.GET.get('year', ''))
except ValueError:
return []
ret = get_occupation_for_month(asset_id, month, year)
return ret
api_task_logger = logging.getLogger("api_tasks")
def on_activity_created(sender, user_id, mongo_object, **kwargs):
api_task_logger.debug("activity created")
data = mongo_object.obj.to_dict()
activity = get_db().get_collection('activity')
unit_activity = activity.find({
'user_id': data['user_id'],
'unit_id': data['unit_id'],
}).count()
course_activity = activity.find({
'user_id': data['user_id'],
'course_id': data['course_id']
}).count()
on_activity_created_task.apply_async(
args=[data, unit_activity, course_activity],
queue=STATS_QUEUE,
)
def on_answer_created(sender, user_id, mongo_object, **kwargs):
api_task_logger.debug("answer created")
on_answer_created_task.apply_async(
args=[mongo_object.obj.to_dict(), mongo_object.extra],
queue=STATS_QUEUE,
)
def on_answer_updated(sender, user_id, mongo_object, **kwargs):
api_task_logger.debug("answer updated")
on_answer_updated_task.apply_async(
args=[mongo_object.obj, mongo_object.extra], # mongo_object.obj is already a dict
queue=STATS_QUEUE,
)
def on_peerreviewsubmission_created(sender, user_id, mongo_object, **kwargs):
api_task_logger.debug("peer review submission created")
on_peerreviewsubmission_created_task.apply_async(
args=[mongo_object.obj.to_dict(), mongo_object.extra],
queue=STATS_QUEUE,
)
def on_history_created(sender, user_id, mongo_object, **kwargs):
api_task_logger.debug("history created")
on_history_created_task.apply_async(
args=[mongo_object.obj.to_dict()],
queue=STATS_QUEUE,
)
mongo_object_created.connect(on_activity_created, sender=ActivityResource,
dispatch_uid="activity_created")
mongo_object_created.connect(on_answer_created, sender=AnswerResource,
dispatch_uid="answer_created")
mongo_object_updated.connect(on_answer_updated, sender=AnswerResource,
dispatch_uid="answer_updated")
mongo_object_created.connect(on_peerreviewsubmission_created,
sender=PeerReviewSubmissionsResource,
dispatch_uid="peerreviewsubmission_created")
mongo_object_created.connect(on_history_created, sender=HistoryResource,
dispatch_uid="history_created")
|
|
#from pysqlite2 import dbapi2 as sqlite
import sqlite3 as sqlite
import re
import math
def getwords(doc):
splitter=re.compile('\\W*')
#print doc
# Split the words by non-alpha characters
words=[s.lower() for s in splitter.split(doc)
if len(s)>2 and len(s)<20]
# Return the unique set of words only
return dict([(w,1) for w in words])
class classifier:
def __init__(self,getfeatures,filename=None):
# Counts of feature/category combinations
self.fc={}
# Counts of documents in each category
self.cc={}
self.getfeatures=getfeatures
def setdb(self,dbfile):
self.con=sqlite.connect(dbfile)
self.con.execute('create table if not exists fc(feature,category,count)')
self.con.execute('create table if not exists cc(category,count)')
def incf(self,f,cat):
count=self.fcount(f,cat)
if count==0:
self.con.execute("insert into fc values ('%s','%s',1)"
% (f,cat))
else:
self.con.execute(
"update fc set count=%d where feature='%s' and category='%s'"
% (count+1,f,cat))
def fcount(self,f,cat):
res=self.con.execute(
'select count from fc where feature="%s" and category="%s"'
%(f,cat)).fetchone()
if res==None: return 0
else: return float(res[0])
def incc(self,cat):
count=self.catcount(cat)
if count==0:
self.con.execute("insert into cc values ('%s',1)" % (cat))
else:
self.con.execute("update cc set count=%d where category='%s'"
% (count+1,cat))
def catcount(self,cat):
res=self.con.execute('select count from cc where category="%s"'
%(cat)).fetchone()
if res==None: return 0
else: return float(res[0])
def categories(self):
cur=self.con.execute('select category from cc');
return [d[0] for d in cur]
def totalcount(self):
res=self.con.execute('select sum(count) from cc').fetchone();
if res==None: return 0
return res[0]
def train(self,item,cat):
features=self.getfeatures(item)
# Increment the count for every feature with this category
for f in features:
self.incf(f,cat)
# Increment the count for this category
self.incc(cat)
self.con.commit()
def fprob(self,f,cat):
if self.catcount(cat)==0: return 0
# The total number of times this feature appeared in this
# category divided by the total number of items in this category
return self.fcount(f,cat)/self.catcount(cat)
def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
# Calculate current probability
basicprob=prf(f,cat)
# Count the number of times this feature has appeared in
# all categories
totals=sum([self.fcount(f,c) for c in self.categories()])
# Calculate the weighted average
bp=((weight*ap)+(totals*basicprob))/(weight+totals)
return bp
class naivebayes(classifier):
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.thresholds={}
def docprob(self,item,cat):
features=self.getfeatures(item)
# Multiply the probabilities of all the features together
p=1
for f in features: p*=self.weightedprob(f,cat,self.fprob)
return p
def prob(self,item,cat):
catprob=self.catcount(cat)/self.totalcount()
docprob=self.docprob(item,cat)
return docprob*catprob
def setthreshold(self,cat,t):
self.thresholds[cat]=t
def getthreshold(self,cat):
if cat not in self.thresholds: return 1.0
return self.thresholds[cat]
def classify(self,item,default=None):
probs={}
# Find the category with the highest probability
max=0.0
for cat in self.categories():
probs[cat]=self.prob(item,cat)
if probs[cat]>max:
max=probs[cat]
best=cat
# Make sure the probability exceeds threshold*next best
for cat in probs:
if cat==best: continue
if probs[cat]*self.getthreshold(best)>probs[best]: return default
return best
class fisherclassifier(classifier):
def cprob(self,f,cat):
# The frequency of this feature in this category
clf=self.fprob(f,cat)
if clf==0: return 0
# The frequency of this feature in all the categories
freqsum=sum([self.fprob(f,c) for c in self.categories()])
# The probability is the frequency in this category divided by
# the overall frequency
p=clf/(freqsum)
return p
def fisherprob(self,item,cat):
# Multiply all the probabilities together
p=1
features=self.getfeatures(item)
for f in features:
p*=(self.weightedprob(f,cat,self.cprob))
# Take the natural log and multiply by -2
fscore=-2*math.log(p)
# Use the inverse chi2 function to get a probability
return self.invchi2(fscore,len(features)*2)
def invchi2(self,chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df//2):
term *= m / i
sum += term
return min(sum, 1.0)
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.setdb('testdb.db')
self.minimums={}
def setminimum(self,cat,min):
self.minimums[cat]=min
def getminimum(self,cat):
if cat not in self.minimums: return 0
return self.minimums[cat]
def classify(self,item,default=None):
# Loop through looking for the best result
best=default
max=0.0
for c in self.categories():
p=self.fisherprob(item,c)
# Make sure it exceeds its minimum
if p>self.getminimum(c) and p>max:
best=c
max=p
return best
# def sampletrain(cl):
# cl.train('Nobody owns the water.','good')
# cl.train('the quick rabbit jumps fences','good')
# cl.train('buy pharmaceuticals now','bad')
# cl.train('make quick money at the online casino','bad')
# cl.train('the quick brown fox jumps','good')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.